CINXE.COM

TensorFlow.js API

<!-- Copyright 2018 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================--> <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>TensorFlow.js API</title> <link rel="shortcut icon" href="/images/favicon.png"> <meta property="og:type" content="article" /> <meta property="og:title" content="TensorFlow.js" /> <meta property="og:description" content="A WebGL accelerated, browser based JavaScript library for training and deploying ML models"> <meta property="og:url" content="https://js.tensorflow.org" /> <meta property="og:image" content="https://js.tensorflow.org/images/TF_JS_twitter.png" /> <meta name="twitter:card" value="summary_large_image"> <meta name="twitter:title" content="TensorFlow.js"> <meta name="twitter:description" content="A WebGL accelerated, browser based JavaScript library for training and deploying ML models"> <meta name="twitter:url" content="https://js.tensorflow.org"> <meta name="twitter:image" content="https://js.tensorflow.org/images/TF_JS_twitter.png"> <meta name="twitter:image:width" content="1856"> <meta name="twitter:image:height" content="431"> <link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet"> <link href="https://fonts.googleapis.com/css?family=Roboto:300,400,500" rel="stylesheet"> <link rel="stylesheet" href="../../css/layout.css" /> <script src="../../js/vendor/material-components-web.js "></script> <script src="../../js/layout.js"></script> <link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,700" rel="stylesheet"> <script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script> <link rel="stylesheet" href="../../css/vendor/highlight-railscasts.css"> <link rel="stylesheet" href="../../css/api.css" /> <script src="../../js/api.js"></script> <link rel="stylesheet" href="../../css/vendor/codemirror.css"> <link rel="stylesheet" href="../../css/vendor/codemirror-railscasts.css"> <script src="../../js/vendor/codemirror.js"></script> <script src="../../js/vendor/codemirror-javascript.js"></script> <script src="../../js/codeSnippets.js"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@4.22.0"> </script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgpu@4.22.0"> </script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@4.22.0/dist/tf-backend-wasm.js"> </script> <meta name="generator" content="Hexo 6.3.0"></head> <body class="mdc-typography"> <!-- Header --> <header class="mdc-toolbar"> <div class="mdc-toolbar__row"> <section class="mdc-toolbar__section mdc-toolbar__section--align-start mdc-toolbar__section--shrink-to-fit"> <button id="nav-menu-button" class="material-icons mdc-toolbar__menu-icon">menu</button> <span class="mdc-toolbar__title"> <a target="_blank" rel="noopener" href="https://www.tensorflow.org/"> <img src="/images/lockup.svg" alt="TensorFlow" class="header-logo"> </a> </span> </section> <section class="mdc-toolbar__section nav-links"> <nav> <a target="_blank" rel="noopener" href="https://www.tensorflow.org/js">Overview</a> <a href="../../api/4.22.0/">API Reference</a> <a href="../../api_node/4.22.0/">Node API</a> <a href="../../api_vis/1.5.1/">tfjs-vis API</a> <a href="../../api_react_native/1.0.0/">tfjs-react-native API</a> <a href="../../api_tflite/0.0.1-alpha.9/">tfjs-tflite API</a> <a href="../../api_tasks/0.0.1-alpha.8/">Task API</a> </nav> </section> <section class="mdc-toolbar__section mdc-toolbar__section mdc-toolbar__section--shrink-to-fit mdc-toolbar__section--align-end mobile-hidden"> <nav> <form id="codepen-form" action="https://codepen.io/pen/define" method="POST" target="_blank"> <input type="hidden" name="data" value='{ "title": "Try TensorFlow.js", "html": &quot;&lt;h1&gt;Try Tensorflow JS right in your browser. Look at the console to see the output.&lt;/h1&gt;&quot;, "js": &quot;// Define a model for linear regression.\nconst model &#x3D; tf.sequential();\nmodel.add(tf.layers.dense({units: 1, inputShape: [1]}));\n\n// Prepare the model for training: Specify the loss and the optimizer.\nmodel.compile({loss: &#x27;meanSquaredError&#x27;, optimizer: &#x27;sgd&#x27;});\n\n// Generate some synthetic data for training.\nconst xs &#x3D; tf.tensor2d([1, 2, 3, 4], [4, 1]);\nconst ys &#x3D; tf.tensor2d([1, 3, 5, 7], [4, 1]);\n\n// Train the model using the data.\nmodel.fit(xs, ys).then(() &#x3D;&gt; {\n // Use the model to do inference on a data point the model hasn&#x27;t seen before:\n // Open the browser devtools to see the output\n model.predict(tf.tensor2d([5], [1, 1])).print();\n});\n &quot;, "editors": "0012", "layout": "top", "description": "Browser based machine learning.", "js_external": "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@4.22.0/dist/tf.min.js"}'> <input class="mdc-button mdc-button--raised" type="submit" value="Try TensorFlow.js!"> </form> </nav> </section> </div> </header> <aside class="sidebar mdc-drawer mdc-drawer--temporary"> <nav class="mdc-drawer__drawer"> <div class="mdc-drawer__toolbar-spacer"> <span class="mdc-toolbar__title">TensorFlow.js</span> </div> <nav> <ul class="mdc-list"> <li class="mdc-list-item"> <a target="_blank" rel="noopener" href="https://www.tensorflow.org/js">Overview</a> </li> <li class="mdc-list-item"> <a href="../../tutorials/">Tutorials & Guides</a> </li> <li class="mdc-list-item"> <a href="../../api/4.22.0/">API Reference</a> </li> <li class="mdc-list-item"> <a href="../../api_node/4.22.0/">Node API</a> </li> <li class="mdc-list-item"> <a href="../../api_vis/1.5.1/">tfjs-vis API</a> </li> <li class="mdc-list-item"> <a target="_blank" rel="noopener" href="https://github.com/tensorflow/tfjs">GitHub</a> </li> </ul> </nav> </nav> </aside> <div class="main"> <div class="page api-docs"> <div class="toc"> <div> <div class="header"> <div class="mdc-select version-selector" role="listbox"> <div class="mdc-select__surface" tabindex="0"> <div class="mdc-select__label mdc-select__label--float-above">API Version</div> <div class="mdc-select__selected-text"></div> <div class="mdc-select__bottom-line"></div> </div> <div class="mdc-menu mdc-select__menu"> <ul class="mdc-list mdc-menu__items"> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.22.0 data-link=../../api/4.22.0/ aria-selected> 4.22.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.21.0 data-link=../../api/4.21.0/ > 4.21.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.20.0 data-link=../../api/4.20.0/ > 4.20.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.19.0 data-link=../../api/4.19.0/ > 4.19.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.18.0 data-link=../../api/4.18.0/ > 4.18.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.17.0 data-link=../../api/4.17.0/ > 4.17.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.16.0 data-link=../../api/4.16.0/ > 4.16.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.15.0 data-link=../../api/4.15.0/ > 4.15.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.14.0 data-link=../../api/4.14.0/ > 4.14.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.13.0 data-link=../../api/4.13.0/ > 4.13.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.12.0 data-link=../../api/4.12.0/ > 4.12.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.11.0 data-link=../../api/4.11.0/ > 4.11.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.10.0 data-link=../../api/4.10.0/ > 4.10.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.9.0 data-link=../../api/4.9.0/ > 4.9.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.8.0 data-link=../../api/4.8.0/ > 4.8.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.7.0 data-link=../../api/4.7.0/ > 4.7.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.6.0 data-link=../../api/4.6.0/ > 4.6.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.4.0 data-link=../../api/4.4.0/ > 4.4.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.2.0 data-link=../../api/4.2.0/ > 4.2.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.1.0 data-link=../../api/4.1.0/ > 4.1.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=4.0.0 data-link=../../api/4.0.0/ > 4.0.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.21.0 data-link=../../api/3.21.0/ > 3.21.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.19.0 data-link=../../api/3.19.0/ > 3.19.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.18.0 data-link=../../api/3.18.0/ > 3.18.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.17.0 data-link=../../api/3.17.0/ > 3.17.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.16.0 data-link=../../api/3.16.0/ > 3.16.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.15.0 data-link=../../api/3.15.0/ > 3.15.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.14.0 data-link=../../api/3.14.0/ > 3.14.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.13.0 data-link=../../api/3.13.0/ > 3.13.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.12.0 data-link=../../api/3.12.0/ > 3.12.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.9.0 data-link=../../api/3.9.0/ > 3.9.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.8.0 data-link=../../api/3.8.0/ > 3.8.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.7.0 data-link=../../api/3.7.0/ > 3.7.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.6.0 data-link=../../api/3.6.0/ > 3.6.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.5.0 data-link=../../api/3.5.0/ > 3.5.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.4.0 data-link=../../api/3.4.0/ > 3.4.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.2.0 data-link=../../api/3.2.0/ > 3.2.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.1.0 data-link=../../api/3.1.0/ > 3.1.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=3.0.0 data-link=../../api/3.0.0/ > 3.0.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.6 data-link=../../api/2.8.6/ > 2.8.6 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.5 data-link=../../api/2.8.5/ > 2.8.5 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.4 data-link=../../api/2.8.4/ > 2.8.4 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.3 data-link=../../api/2.8.3/ > 2.8.3 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.2 data-link=../../api/2.8.2/ > 2.8.2 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.1 data-link=../../api/2.8.1/ > 2.8.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.8.0 data-link=../../api/2.8.0/ > 2.8.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.7.0 data-link=../../api/2.7.0/ > 2.7.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.6.0 data-link=../../api/2.6.0/ > 2.6.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.4.0 data-link=../../api/2.4.0/ > 2.4.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.3.0 data-link=../../api/2.3.0/ > 2.3.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.1.0 data-link=../../api/2.1.0/ > 2.1.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.0.1 data-link=../../api/2.0.1/ > 2.0.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=2.0.0 data-link=../../api/2.0.0/ > 2.0.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.7.4 data-link=../../api/1.7.4/ > 1.7.4 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.7.2 data-link=../../api/1.7.2/ > 1.7.2 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.7.1 data-link=../../api/1.7.1/ > 1.7.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.7.0 data-link=../../api/1.7.0/ > 1.7.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.6.0 data-link=../../api/1.6.0/ > 1.6.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.5.2 data-link=../../api/1.5.2/ > 1.5.2 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.5.1 data-link=../../api/1.5.1/ > 1.5.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.3.1 data-link=../../api/1.3.1/ > 1.3.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.3.0 data-link=../../api/1.3.0/ > 1.3.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.2.11 data-link=../../api/1.2.11/ > 1.2.11 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.2.10 data-link=../../api/1.2.10/ > 1.2.10 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.2.8 data-link=../../api/1.2.8/ > 1.2.8 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.2.7 data-link=../../api/1.2.7/ > 1.2.7 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.2.6 data-link=../../api/1.2.6/ > 1.2.6 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.2.5 data-link=../../api/1.2.5/ > 1.2.5 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.1.2 data-link=../../api/1.1.2/ > 1.1.2 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.1.0 data-link=../../api/1.1.0/ > 1.1.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.0.4 data-link=../../api/1.0.4/ > 1.0.4 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=1.0.0 data-link=../../api/1.0.0/ > 1.0.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.15.3 data-link=../../api/0.15.3/ > 0.15.3 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.15.1 data-link=../../api/0.15.1/ > 0.15.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.14.2 data-link=../../api/0.14.2/ > 0.14.2 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.14.1 data-link=../../api/0.14.1/ > 0.14.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.13.3 data-link=../../api/0.13.3/ > 0.13.3 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.13.0 data-link=../../api/0.13.0/ > 0.13.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.12.5 data-link=../../api/0.12.5/ > 0.12.5 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.12.0 data-link=../../api/0.12.0/ > 0.12.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.11.7 data-link=../../api/0.11.7/ > 0.11.7 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.11.6 data-link=../../api/0.11.6/ > 0.11.6 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.11.2 data-link=../../api/0.11.2/ > 0.11.2 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.11.1 data-link=../../api/0.11.1/ > 0.11.1 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.10.0 data-link=../../api/0.10.0/ > 0.10.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.9.0 data-link=../../api/0.9.0/ > 0.9.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.8.0 data-link=../../api/0.8.0/ > 0.8.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.7.0 data-link=../../api/0.7.0/ > 0.7.0 </li> <li class="mdc-list-item" role="option" tabindex="0" data-version=0.6.1 data-link=../../api/0.6.1/ > 0.6.1 </li> </ul> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Tensors">Tensors</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Tensors-Creation">Creation</a> </div> <div class="symbol"> <a href="#tensor">tf.tensor</a> </div> <div class="symbol"> <a href="#scalar">tf.scalar</a> </div> <div class="symbol"> <a href="#tensor1d">tf.tensor1d</a> </div> <div class="symbol"> <a href="#tensor2d">tf.tensor2d</a> </div> <div class="symbol"> <a href="#tensor3d">tf.tensor3d</a> </div> <div class="symbol"> <a href="#tensor4d">tf.tensor4d</a> </div> <div class="symbol"> <a href="#tensor5d">tf.tensor5d</a> </div> <div class="symbol"> <a href="#tensor6d">tf.tensor6d</a> </div> <div class="symbol"> <a href="#buffer">tf.buffer</a> </div> <div class="symbol"> <a href="#clone">tf.clone</a> </div> <div class="symbol"> <a href="#complex">tf.complex</a> </div> <div class="symbol"> <a href="#diag">tf.diag</a> </div> <div class="symbol"> <a href="#eye">tf.eye</a> </div> <div class="symbol"> <a href="#fill">tf.fill</a> </div> <div class="symbol"> <a href="#imag">tf.imag</a> </div> <div class="symbol"> <a href="#linspace">tf.linspace</a> </div> <div class="symbol"> <a href="#oneHot">tf.oneHot</a> </div> <div class="symbol"> <a href="#ones">tf.ones</a> </div> <div class="symbol"> <a href="#onesLike">tf.onesLike</a> </div> <div class="symbol"> <a href="#print">tf.print</a> </div> <div class="symbol"> <a href="#range">tf.range</a> </div> <div class="symbol"> <a href="#real">tf.real</a> </div> <div class="symbol"> <a href="#truncatedNormal">tf.truncatedNormal</a> </div> <div class="symbol"> <a href="#variable">tf.variable</a> </div> <div class="symbol"> <a href="#zeros">tf.zeros</a> </div> <div class="symbol"> <a href="#zerosLike">tf.zerosLike</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Tensors-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:Tensor">tf.Tensor</a> <div class="method-list"> <div class="method"> <a href="#tf.Tensor.buffer">.buffer</a> </div> <div class="method"> <a href="#tf.Tensor.bufferSync">.bufferSync</a> </div> <div class="method"> <a href="#tf.Tensor.array">.array</a> </div> <div class="method"> <a href="#tf.Tensor.arraySync">.arraySync</a> </div> <div class="method"> <a href="#tf.Tensor.data">.data</a> </div> <div class="method"> <a href="#tf.Tensor.dataToGPU">.dataToGPU</a> </div> <div class="method"> <a href="#tf.Tensor.dataSync">.dataSync</a> </div> <div class="method"> <a href="#tf.Tensor.dispose">.dispose</a> </div> <div class="method"> <a href="#tf.Tensor.print">.print</a> </div> <div class="method"> <a href="#tf.Tensor.clone">.clone</a> </div> <div class="method"> <a href="#tf.Tensor.toString">.toString</a> </div> </div> </div> <div class="symbol"> <a href="#class:Variable">tf.Variable</a> <div class="method-list"> <div class="method"> <a href="#tf.Variable.assign">.assign</a> </div> </div> </div> <div class="symbol"> <a href="#class:TensorBuffer">tf.TensorBuffer</a> <div class="method-list"> <div class="method"> <a href="#tf.TensorBuffer.set">.set</a> </div> <div class="method"> <a href="#tf.TensorBuffer.get">.get</a> </div> <div class="method"> <a href="#tf.TensorBuffer.toTensor">.toTensor</a> </div> </div> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Tensors-Transformations">Transformations</a> </div> <div class="symbol"> <a href="#batchToSpaceND">tf.batchToSpaceND</a> </div> <div class="symbol"> <a href="#broadcastArgs">tf.broadcastArgs</a> </div> <div class="symbol"> <a href="#broadcastTo">tf.broadcastTo</a> </div> <div class="symbol"> <a href="#cast">tf.cast</a> </div> <div class="symbol"> <a href="#depthToSpace">tf.depthToSpace</a> </div> <div class="symbol"> <a href="#ensureShape">tf.ensureShape</a> </div> <div class="symbol"> <a href="#expandDims">tf.expandDims</a> </div> <div class="symbol"> <a href="#mirrorPad">tf.mirrorPad</a> </div> <div class="symbol"> <a href="#pad">tf.pad</a> </div> <div class="symbol"> <a href="#reshape">tf.reshape</a> </div> <div class="symbol"> <a href="#setdiff1dAsync">tf.setdiff1dAsync</a> </div> <div class="symbol"> <a href="#spaceToBatchND">tf.spaceToBatchND</a> </div> <div class="symbol"> <a href="#squeeze">tf.squeeze</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Tensors-Slicing and Joining">Slicing and Joining</a> </div> <div class="symbol"> <a href="#booleanMaskAsync">tf.booleanMaskAsync</a> </div> <div class="symbol"> <a href="#concat">tf.concat</a> </div> <div class="symbol"> <a href="#gather">tf.gather</a> </div> <div class="symbol"> <a href="#reverse">tf.reverse</a> </div> <div class="symbol"> <a href="#slice">tf.slice</a> </div> <div class="symbol"> <a href="#split">tf.split</a> </div> <div class="symbol"> <a href="#stack">tf.stack</a> </div> <div class="symbol"> <a href="#tile">tf.tile</a> </div> <div class="symbol"> <a href="#unstack">tf.unstack</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Tensors-Matrices">Matrices</a> </div> <div class="symbol"> <a href="#einsum">tf.einsum</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Tensors-Random">Random</a> </div> <div class="symbol"> <a href="#multinomial">tf.multinomial</a> </div> <div class="symbol"> <a href="#rand">tf.rand</a> </div> <div class="symbol"> <a href="#randomGamma">tf.randomGamma</a> </div> <div class="symbol"> <a href="#randomNormal">tf.randomNormal</a> </div> <div class="symbol"> <a href="#randomStandardNormal">tf.randomStandardNormal</a> </div> <div class="symbol"> <a href="#randomUniform">tf.randomUniform</a> </div> <div class="symbol"> <a href="#randomUniformInt">tf.randomUniformInt</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Models">Models</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Creation">Creation</a> </div> <div class="symbol"> <a href="#sequential">tf.sequential</a> </div> <div class="symbol"> <a href="#model">tf.model</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Inputs">Inputs</a> </div> <div class="symbol"> <a href="#input">tf.input</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Loading">Loading</a> </div> <div class="symbol"> <a href="#loadGraphModel">tf.loadGraphModel</a> </div> <div class="symbol"> <a href="#loadLayersModel">tf.loadLayersModel</a> </div> <div class="symbol"> <a href="#io.browserDownloads">tf.io.browserDownloads</a> </div> <div class="symbol"> <a href="#io.browserFiles">tf.io.browserFiles</a> </div> <div class="symbol"> <a href="#io.http">tf.io.http</a> </div> <div class="symbol"> <a href="#loadGraphModelSync">tf.loadGraphModelSync</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Management">Management</a> </div> <div class="symbol"> <a href="#io.copyModel">tf.io.copyModel</a> </div> <div class="symbol"> <a href="#io.listModels">tf.io.listModels</a> </div> <div class="symbol"> <a href="#io.moveModel">tf.io.moveModel</a> </div> <div class="symbol"> <a href="#io.removeModel">tf.io.removeModel</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Serialization">Serialization</a> </div> <div class="symbol"> <a href="#registerClass">tf.registerClass</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:Functional">tf.Functional</a> <div class="method-list"> </div> </div> <div class="symbol"> <a href="#class:GraphModel">tf.GraphModel</a> <div class="method-list"> <div class="method"> <a href="#tf.GraphModel.loadSync">.loadSync</a> </div> <div class="method"> <a href="#tf.GraphModel.save">.save</a> </div> <div class="method"> <a href="#tf.GraphModel.predict">.predict</a> </div> <div class="method"> <a href="#tf.GraphModel.predictAsync">.predictAsync</a> </div> <div class="method"> <a href="#tf.GraphModel.execute">.execute</a> </div> <div class="method"> <a href="#tf.GraphModel.executeAsync">.executeAsync</a> </div> <div class="method"> <a href="#tf.GraphModel.getIntermediateTensors">.getIntermediateTensors</a> </div> <div class="method"> <a href="#tf.GraphModel.disposeIntermediateTensors">.disposeIntermediateTensors</a> </div> <div class="method"> <a href="#tf.GraphModel.dispose">.dispose</a> </div> </div> </div> <div class="symbol"> <a href="#class:LayersModel">tf.LayersModel</a> <div class="method-list"> <div class="method"> <a href="#tf.LayersModel.summary">.summary</a> </div> <div class="method"> <a href="#tf.LayersModel.compile">.compile</a> </div> <div class="method"> <a href="#tf.LayersModel.evaluate">.evaluate</a> </div> <div class="method"> <a href="#tf.LayersModel.evaluateDataset">.evaluateDataset</a> </div> <div class="method"> <a href="#tf.LayersModel.predict">.predict</a> </div> <div class="method"> <a href="#tf.LayersModel.predictOnBatch">.predictOnBatch</a> </div> <div class="method"> <a href="#tf.LayersModel.fit">.fit</a> </div> <div class="method"> <a href="#tf.LayersModel.fitDataset">.fitDataset</a> </div> <div class="method"> <a href="#tf.LayersModel.trainOnBatch">.trainOnBatch</a> </div> <div class="method"> <a href="#tf.LayersModel.save">.save</a> </div> <div class="method"> <a href="#tf.LayersModel.getLayer">.getLayer</a> </div> <div class="method"> <a href="#tf.LayersModel.getLayer">.getLayer</a> </div> <div class="method"> <a href="#tf.LayersModel.getLayer">.getLayer</a> </div> <div class="method"> <a href="#tf.LayersModel.getLayer">.getLayer</a> </div> </div> </div> <div class="symbol"> <a href="#class:Sequential">tf.Sequential</a> <div class="method-list"> <div class="method"> <a href="#tf.Sequential.add">.add</a> </div> </div> </div> <div class="symbol"> <a href="#class:SymbolicTensor">tf.SymbolicTensor</a> <div class="method-list"> </div> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Models-Op Registry">Op Registry</a> </div> <div class="symbol"> <a href="#deregisterOp">tf.deregisterOp</a> </div> <div class="symbol"> <a href="#getRegisteredOp">tf.getRegisteredOp</a> </div> <div class="symbol"> <a href="#registerOp">tf.registerOp</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Layers">Layers</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Advanced Activation">Advanced Activation</a> </div> <div class="symbol"> <a href="#layers.elu">tf.layers.elu</a> </div> <div class="symbol"> <a href="#layers.leakyReLU">tf.layers.leakyReLU</a> </div> <div class="symbol"> <a href="#layers.prelu">tf.layers.prelu</a> </div> <div class="symbol"> <a href="#layers.reLU">tf.layers.reLU</a> </div> <div class="symbol"> <a href="#layers.softmax">tf.layers.softmax</a> </div> <div class="symbol"> <a href="#layers.thresholdedReLU">tf.layers.thresholdedReLU</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Basic">Basic</a> </div> <div class="symbol"> <a href="#layers.activation">tf.layers.activation</a> </div> <div class="symbol"> <a href="#layers.dense">tf.layers.dense</a> </div> <div class="symbol"> <a href="#layers.dropout">tf.layers.dropout</a> </div> <div class="symbol"> <a href="#layers.embedding">tf.layers.embedding</a> </div> <div class="symbol"> <a href="#layers.flatten">tf.layers.flatten</a> </div> <div class="symbol"> <a href="#layers.permute">tf.layers.permute</a> </div> <div class="symbol"> <a href="#layers.repeatVector">tf.layers.repeatVector</a> </div> <div class="symbol"> <a href="#layers.reshape">tf.layers.reshape</a> </div> <div class="symbol"> <a href="#layers.spatialDropout1d">tf.layers.spatialDropout1d</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Convolutional">Convolutional</a> </div> <div class="symbol"> <a href="#layers.conv1d">tf.layers.conv1d</a> </div> <div class="symbol"> <a href="#layers.conv2d">tf.layers.conv2d</a> </div> <div class="symbol"> <a href="#layers.conv2dTranspose">tf.layers.conv2dTranspose</a> </div> <div class="symbol"> <a href="#layers.conv3d">tf.layers.conv3d</a> </div> <div class="symbol"> <a href="#layers.cropping2D">tf.layers.cropping2D</a> </div> <div class="symbol"> <a href="#layers.depthwiseConv2d">tf.layers.depthwiseConv2d</a> </div> <div class="symbol"> <a href="#layers.separableConv2d">tf.layers.separableConv2d</a> </div> <div class="symbol"> <a href="#layers.upSampling2d">tf.layers.upSampling2d</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Merge">Merge</a> </div> <div class="symbol"> <a href="#layers.add">tf.layers.add</a> </div> <div class="symbol"> <a href="#layers.average">tf.layers.average</a> </div> <div class="symbol"> <a href="#layers.concatenate">tf.layers.concatenate</a> </div> <div class="symbol"> <a href="#layers.dot">tf.layers.dot</a> </div> <div class="symbol"> <a href="#layers.maximum">tf.layers.maximum</a> </div> <div class="symbol"> <a href="#layers.minimum">tf.layers.minimum</a> </div> <div class="symbol"> <a href="#layers.multiply">tf.layers.multiply</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Normalization">Normalization</a> </div> <div class="symbol"> <a href="#layers.batchNormalization">tf.layers.batchNormalization</a> </div> <div class="symbol"> <a href="#layers.layerNormalization">tf.layers.layerNormalization</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Pooling">Pooling</a> </div> <div class="symbol"> <a href="#layers.averagePooling1d">tf.layers.averagePooling1d</a> </div> <div class="symbol"> <a href="#layers.averagePooling2d">tf.layers.averagePooling2d</a> </div> <div class="symbol"> <a href="#layers.averagePooling3d">tf.layers.averagePooling3d</a> </div> <div class="symbol"> <a href="#layers.globalAveragePooling1d">tf.layers.globalAveragePooling1d</a> </div> <div class="symbol"> <a href="#layers.globalAveragePooling2d">tf.layers.globalAveragePooling2d</a> </div> <div class="symbol"> <a href="#layers.globalMaxPooling1d">tf.layers.globalMaxPooling1d</a> </div> <div class="symbol"> <a href="#layers.globalMaxPooling2d">tf.layers.globalMaxPooling2d</a> </div> <div class="symbol"> <a href="#layers.maxPooling1d">tf.layers.maxPooling1d</a> </div> <div class="symbol"> <a href="#layers.maxPooling2d">tf.layers.maxPooling2d</a> </div> <div class="symbol"> <a href="#layers.maxPooling3d">tf.layers.maxPooling3d</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Recurrent">Recurrent</a> </div> <div class="symbol"> <a href="#layers.convLstm2d">tf.layers.convLstm2d</a> </div> <div class="symbol"> <a href="#layers.convLstm2dCell">tf.layers.convLstm2dCell</a> </div> <div class="symbol"> <a href="#layers.gru">tf.layers.gru</a> </div> <div class="symbol"> <a href="#layers.gruCell">tf.layers.gruCell</a> </div> <div class="symbol"> <a href="#layers.lstm">tf.layers.lstm</a> </div> <div class="symbol"> <a href="#layers.lstmCell">tf.layers.lstmCell</a> </div> <div class="symbol"> <a href="#layers.rnn">tf.layers.rnn</a> </div> <div class="symbol"> <a href="#layers.simpleRNN">tf.layers.simpleRNN</a> </div> <div class="symbol"> <a href="#layers.simpleRNNCell">tf.layers.simpleRNNCell</a> </div> <div class="symbol"> <a href="#layers.stackedRNNCells">tf.layers.stackedRNNCells</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Wrapper">Wrapper</a> </div> <div class="symbol"> <a href="#layers.bidirectional">tf.layers.bidirectional</a> </div> <div class="symbol"> <a href="#layers.timeDistributed">tf.layers.timeDistributed</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:layers.Layer">tf.layers.Layer</a> <div class="method-list"> <div class="method"> <a href="#tf.layers.Layer.apply">.apply</a> </div> <div class="method"> <a href="#tf.layers.Layer.countParams">.countParams</a> </div> <div class="method"> <a href="#tf.layers.Layer.build">.build</a> </div> <div class="method"> <a href="#tf.layers.Layer.getWeights">.getWeights</a> </div> <div class="method"> <a href="#tf.layers.Layer.setWeights">.setWeights</a> </div> <div class="method"> <a href="#tf.layers.Layer.addWeight">.addWeight</a> </div> <div class="method"> <a href="#tf.layers.Layer.addLoss">.addLoss</a> </div> <div class="method"> <a href="#tf.layers.Layer.computeOutputShape">.computeOutputShape</a> </div> <div class="method"> <a href="#tf.layers.Layer.getConfig">.getConfig</a> </div> <div class="method"> <a href="#tf.layers.Layer.dispose">.dispose</a> </div> </div> </div> <div class="symbol"> <a href="#class:RNNCell">tf.RNNCell</a> <div class="method-list"> </div> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Inputs">Inputs</a> </div> <div class="symbol"> <a href="#layers.inputLayer">tf.layers.inputLayer</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Padding">Padding</a> </div> <div class="symbol"> <a href="#layers.zeroPadding2d">tf.layers.zeroPadding2d</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Noise">Noise</a> </div> <div class="symbol"> <a href="#layers.alphaDropout">tf.layers.alphaDropout</a> </div> <div class="symbol"> <a href="#layers.gaussianDropout">tf.layers.gaussianDropout</a> </div> <div class="symbol"> <a href="#layers.gaussianNoise">tf.layers.gaussianNoise</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Mask">Mask</a> </div> <div class="symbol"> <a href="#layers.masking">tf.layers.masking</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Rescaling">Rescaling</a> </div> <div class="symbol"> <a href="#layers.rescaling">tf.layers.rescaling</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-CenterCrop">CenterCrop</a> </div> <div class="symbol"> <a href="#layers.centerCrop">tf.layers.centerCrop</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-Resizing">Resizing</a> </div> <div class="symbol"> <a href="#layers.resizing">tf.layers.resizing</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-CategoryEncoding">CategoryEncoding</a> </div> <div class="symbol"> <a href="#layers.categoryEncoding">tf.layers.categoryEncoding</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Layers-RandomWidth">RandomWidth</a> </div> <div class="symbol"> <a href="#layers.randomWidth">tf.layers.randomWidth</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Operations">Operations</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Arithmetic">Arithmetic</a> </div> <div class="symbol"> <a href="#add">tf.add</a> </div> <div class="symbol"> <a href="#sub">tf.sub</a> </div> <div class="symbol"> <a href="#mul">tf.mul</a> </div> <div class="symbol"> <a href="#div">tf.div</a> </div> <div class="symbol"> <a href="#addN">tf.addN</a> </div> <div class="symbol"> <a href="#divNoNan">tf.divNoNan</a> </div> <div class="symbol"> <a href="#floorDiv">tf.floorDiv</a> </div> <div class="symbol"> <a href="#maximum">tf.maximum</a> </div> <div class="symbol"> <a href="#minimum">tf.minimum</a> </div> <div class="symbol"> <a href="#mod">tf.mod</a> </div> <div class="symbol"> <a href="#pow">tf.pow</a> </div> <div class="symbol"> <a href="#squaredDifference">tf.squaredDifference</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Basic math">Basic math</a> </div> <div class="symbol"> <a href="#abs">tf.abs</a> </div> <div class="symbol"> <a href="#acos">tf.acos</a> </div> <div class="symbol"> <a href="#acosh">tf.acosh</a> </div> <div class="symbol"> <a href="#asin">tf.asin</a> </div> <div class="symbol"> <a href="#asinh">tf.asinh</a> </div> <div class="symbol"> <a href="#atan">tf.atan</a> </div> <div class="symbol"> <a href="#atan2">tf.atan2</a> </div> <div class="symbol"> <a href="#atanh">tf.atanh</a> </div> <div class="symbol"> <a href="#ceil">tf.ceil</a> </div> <div class="symbol"> <a href="#clipByValue">tf.clipByValue</a> </div> <div class="symbol"> <a href="#cos">tf.cos</a> </div> <div class="symbol"> <a href="#cosh">tf.cosh</a> </div> <div class="symbol"> <a href="#elu">tf.elu</a> </div> <div class="symbol"> <a href="#erf">tf.erf</a> </div> <div class="symbol"> <a href="#exp">tf.exp</a> </div> <div class="symbol"> <a href="#expm1">tf.expm1</a> </div> <div class="symbol"> <a href="#floor">tf.floor</a> </div> <div class="symbol"> <a href="#isFinite">tf.isFinite</a> </div> <div class="symbol"> <a href="#isInf">tf.isInf</a> </div> <div class="symbol"> <a href="#isNaN">tf.isNaN</a> </div> <div class="symbol"> <a href="#leakyRelu">tf.leakyRelu</a> </div> <div class="symbol"> <a href="#log">tf.log</a> </div> <div class="symbol"> <a href="#log1p">tf.log1p</a> </div> <div class="symbol"> <a href="#logSigmoid">tf.logSigmoid</a> </div> <div class="symbol"> <a href="#neg">tf.neg</a> </div> <div class="symbol"> <a href="#prelu">tf.prelu</a> </div> <div class="symbol"> <a href="#reciprocal">tf.reciprocal</a> </div> <div class="symbol"> <a href="#relu">tf.relu</a> </div> <div class="symbol"> <a href="#relu6">tf.relu6</a> </div> <div class="symbol"> <a href="#round">tf.round</a> </div> <div class="symbol"> <a href="#rsqrt">tf.rsqrt</a> </div> <div class="symbol"> <a href="#selu">tf.selu</a> </div> <div class="symbol"> <a href="#sigmoid">tf.sigmoid</a> </div> <div class="symbol"> <a href="#sign">tf.sign</a> </div> <div class="symbol"> <a href="#sin">tf.sin</a> </div> <div class="symbol"> <a href="#sinh">tf.sinh</a> </div> <div class="symbol"> <a href="#softplus">tf.softplus</a> </div> <div class="symbol"> <a href="#sqrt">tf.sqrt</a> </div> <div class="symbol"> <a href="#square">tf.square</a> </div> <div class="symbol"> <a href="#step">tf.step</a> </div> <div class="symbol"> <a href="#tan">tf.tan</a> </div> <div class="symbol"> <a href="#tanh">tf.tanh</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Matrices">Matrices</a> </div> <div class="symbol"> <a href="#dot">tf.dot</a> </div> <div class="symbol"> <a href="#euclideanNorm">tf.euclideanNorm</a> </div> <div class="symbol"> <a href="#matMul">tf.matMul</a> </div> <div class="symbol"> <a href="#norm">tf.norm</a> </div> <div class="symbol"> <a href="#outerProduct">tf.outerProduct</a> </div> <div class="symbol"> <a href="#transpose">tf.transpose</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Convolution">Convolution</a> </div> <div class="symbol"> <a href="#avgPool">tf.avgPool</a> </div> <div class="symbol"> <a href="#avgPool3d">tf.avgPool3d</a> </div> <div class="symbol"> <a href="#conv1d">tf.conv1d</a> </div> <div class="symbol"> <a href="#conv2d">tf.conv2d</a> </div> <div class="symbol"> <a href="#conv2dTranspose">tf.conv2dTranspose</a> </div> <div class="symbol"> <a href="#conv3d">tf.conv3d</a> </div> <div class="symbol"> <a href="#conv3dTranspose">tf.conv3dTranspose</a> </div> <div class="symbol"> <a href="#depthwiseConv2d">tf.depthwiseConv2d</a> </div> <div class="symbol"> <a href="#dilation2d">tf.dilation2d</a> </div> <div class="symbol"> <a href="#maxPool3d">tf.maxPool3d</a> </div> <div class="symbol"> <a href="#maxPoolWithArgmax">tf.maxPoolWithArgmax</a> </div> <div class="symbol"> <a href="#pool">tf.pool</a> </div> <div class="symbol"> <a href="#separableConv2d">tf.separableConv2d</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Reduction">Reduction</a> </div> <div class="symbol"> <a href="#all">tf.all</a> </div> <div class="symbol"> <a href="#any">tf.any</a> </div> <div class="symbol"> <a href="#argMax">tf.argMax</a> </div> <div class="symbol"> <a href="#argMin">tf.argMin</a> </div> <div class="symbol"> <a href="#bincount">tf.bincount</a> </div> <div class="symbol"> <a href="#denseBincount">tf.denseBincount</a> </div> <div class="symbol"> <a href="#logSumExp">tf.logSumExp</a> </div> <div class="symbol"> <a href="#max">tf.max</a> </div> <div class="symbol"> <a href="#mean">tf.mean</a> </div> <div class="symbol"> <a href="#min">tf.min</a> </div> <div class="symbol"> <a href="#prod">tf.prod</a> </div> <div class="symbol"> <a href="#sum">tf.sum</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Normalization">Normalization</a> </div> <div class="symbol"> <a href="#batchNorm">tf.batchNorm</a> </div> <div class="symbol"> <a href="#localResponseNormalization">tf.localResponseNormalization</a> </div> <div class="symbol"> <a href="#logSoftmax">tf.logSoftmax</a> </div> <div class="symbol"> <a href="#moments">tf.moments</a> </div> <div class="symbol"> <a href="#softmax">tf.softmax</a> </div> <div class="symbol"> <a href="#sparseToDense">tf.sparseToDense</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Images">Images</a> </div> <div class="symbol"> <a href="#image.cropAndResize">tf.image.cropAndResize</a> </div> <div class="symbol"> <a href="#image.flipLeftRight">tf.image.flipLeftRight</a> </div> <div class="symbol"> <a href="#image.grayscaleToRGB">tf.image.grayscaleToRGB</a> </div> <div class="symbol"> <a href="#image.nonMaxSuppression">tf.image.nonMaxSuppression</a> </div> <div class="symbol"> <a href="#image.nonMaxSuppressionAsync">tf.image.nonMaxSuppressionAsync</a> </div> <div class="symbol"> <a href="#image.nonMaxSuppressionPadded">tf.image.nonMaxSuppressionPadded</a> </div> <div class="symbol"> <a href="#image.nonMaxSuppressionPaddedAsync">tf.image.nonMaxSuppressionPaddedAsync</a> </div> <div class="symbol"> <a href="#image.nonMaxSuppressionWithScore">tf.image.nonMaxSuppressionWithScore</a> </div> <div class="symbol"> <a href="#image.nonMaxSuppressionWithScoreAsync">tf.image.nonMaxSuppressionWithScoreAsync</a> </div> <div class="symbol"> <a href="#image.resizeBilinear">tf.image.resizeBilinear</a> </div> <div class="symbol"> <a href="#image.resizeNearestNeighbor">tf.image.resizeNearestNeighbor</a> </div> <div class="symbol"> <a href="#image.rgbToGrayscale">tf.image.rgbToGrayscale</a> </div> <div class="symbol"> <a href="#image.rotateWithOffset">tf.image.rotateWithOffset</a> </div> <div class="symbol"> <a href="#image.transform">tf.image.transform</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-RNN">RNN</a> </div> <div class="symbol"> <a href="#basicLSTMCell">tf.basicLSTMCell</a> </div> <div class="symbol"> <a href="#multiRNNCell">tf.multiRNNCell</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Logical">Logical</a> </div> <div class="symbol"> <a href="#bitwiseAnd">tf.bitwiseAnd</a> </div> <div class="symbol"> <a href="#equal">tf.equal</a> </div> <div class="symbol"> <a href="#greater">tf.greater</a> </div> <div class="symbol"> <a href="#greaterEqual">tf.greaterEqual</a> </div> <div class="symbol"> <a href="#less">tf.less</a> </div> <div class="symbol"> <a href="#lessEqual">tf.lessEqual</a> </div> <div class="symbol"> <a href="#logicalAnd">tf.logicalAnd</a> </div> <div class="symbol"> <a href="#logicalNot">tf.logicalNot</a> </div> <div class="symbol"> <a href="#logicalOr">tf.logicalOr</a> </div> <div class="symbol"> <a href="#logicalXor">tf.logicalXor</a> </div> <div class="symbol"> <a href="#notEqual">tf.notEqual</a> </div> <div class="symbol"> <a href="#where">tf.where</a> </div> <div class="symbol"> <a href="#whereAsync">tf.whereAsync</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Scan">Scan</a> </div> <div class="symbol"> <a href="#cumprod">tf.cumprod</a> </div> <div class="symbol"> <a href="#cumsum">tf.cumsum</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Evaluation">Evaluation</a> </div> <div class="symbol"> <a href="#confusionMatrix">tf.confusionMatrix</a> </div> <div class="symbol"> <a href="#inTopKAsync">tf.inTopKAsync</a> </div> <div class="symbol"> <a href="#lowerBound">tf.lowerBound</a> </div> <div class="symbol"> <a href="#searchSorted">tf.searchSorted</a> </div> <div class="symbol"> <a href="#topk">tf.topk</a> </div> <div class="symbol"> <a href="#unique">tf.unique</a> </div> <div class="symbol"> <a href="#upperBound">tf.upperBound</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Slicing and Joining">Slicing and Joining</a> </div> <div class="symbol"> <a href="#gatherND">tf.gatherND</a> </div> <div class="symbol"> <a href="#meshgrid">tf.meshgrid</a> </div> <div class="symbol"> <a href="#scatterND">tf.scatterND</a> </div> <div class="symbol"> <a href="#stridedSlice">tf.stridedSlice</a> </div> <div class="symbol"> <a href="#tensorScatterUpdate">tf.tensorScatterUpdate</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Ragged">Ragged</a> </div> <div class="symbol"> <a href="#raggedTensorToTensor">tf.raggedTensorToTensor</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Spectral">Spectral</a> </div> <div class="symbol"> <a href="#spectral.fft">tf.spectral.fft</a> </div> <div class="symbol"> <a href="#spectral.ifft">tf.spectral.ifft</a> </div> <div class="symbol"> <a href="#spectral.irfft">tf.spectral.irfft</a> </div> <div class="symbol"> <a href="#spectral.rfft">tf.spectral.rfft</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Segment">Segment</a> </div> <div class="symbol"> <a href="#unsortedSegmentSum">tf.unsortedSegmentSum</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Moving Average">Moving Average</a> </div> <div class="symbol"> <a href="#movingAverage">tf.movingAverage</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Dropout">Dropout</a> </div> <div class="symbol"> <a href="#dropout">tf.dropout</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Signal">Signal</a> </div> <div class="symbol"> <a href="#signal.frame">tf.signal.frame</a> </div> <div class="symbol"> <a href="#signal.hammingWindow">tf.signal.hammingWindow</a> </div> <div class="symbol"> <a href="#signal.hannWindow">tf.signal.hannWindow</a> </div> <div class="symbol"> <a href="#signal.stft">tf.signal.stft</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Linear Algebra">Linear Algebra</a> </div> <div class="symbol"> <a href="#linalg.bandPart">tf.linalg.bandPart</a> </div> <div class="symbol"> <a href="#linalg.gramSchmidt">tf.linalg.gramSchmidt</a> </div> <div class="symbol"> <a href="#linalg.qr">tf.linalg.qr</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-Sparse">Sparse</a> </div> <div class="symbol"> <a href="#sparseFillEmptyRows">tf.sparseFillEmptyRows</a> </div> <div class="symbol"> <a href="#sparseReshape">tf.sparseReshape</a> </div> <div class="symbol"> <a href="#sparseSegmentMean">tf.sparseSegmentMean</a> </div> <div class="symbol"> <a href="#sparseSegmentSum">tf.sparseSegmentSum</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Operations-String">String</a> </div> <div class="symbol"> <a href="#staticRegexReplace">tf.staticRegexReplace</a> </div> <div class="symbol"> <a href="#stringNGrams">tf.stringNGrams</a> </div> <div class="symbol"> <a href="#stringSplit">tf.stringSplit</a> </div> <div class="symbol"> <a href="#stringToHashBucketFast">tf.stringToHashBucketFast</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Training">Training</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Training-Gradients">Gradients</a> </div> <div class="symbol"> <a href="#grad">tf.grad</a> </div> <div class="symbol"> <a href="#grads">tf.grads</a> </div> <div class="symbol"> <a href="#customGrad">tf.customGrad</a> </div> <div class="symbol"> <a href="#valueAndGrad">tf.valueAndGrad</a> </div> <div class="symbol"> <a href="#valueAndGrads">tf.valueAndGrads</a> </div> <div class="symbol"> <a href="#variableGrads">tf.variableGrads</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Training-Optimizers">Optimizers</a> </div> <div class="symbol"> <a href="#train.sgd">tf.train.sgd</a> </div> <div class="symbol"> <a href="#train.momentum">tf.train.momentum</a> </div> <div class="symbol"> <a href="#train.adagrad">tf.train.adagrad</a> </div> <div class="symbol"> <a href="#train.adadelta">tf.train.adadelta</a> </div> <div class="symbol"> <a href="#train.adam">tf.train.adam</a> </div> <div class="symbol"> <a href="#train.adamax">tf.train.adamax</a> </div> <div class="symbol"> <a href="#train.rmsprop">tf.train.rmsprop</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Training-Losses">Losses</a> </div> <div class="symbol"> <a href="#losses.absoluteDifference">tf.losses.absoluteDifference</a> </div> <div class="symbol"> <a href="#losses.computeWeightedLoss">tf.losses.computeWeightedLoss</a> </div> <div class="symbol"> <a href="#losses.cosineDistance">tf.losses.cosineDistance</a> </div> <div class="symbol"> <a href="#losses.hingeLoss">tf.losses.hingeLoss</a> </div> <div class="symbol"> <a href="#losses.huberLoss">tf.losses.huberLoss</a> </div> <div class="symbol"> <a href="#losses.logLoss">tf.losses.logLoss</a> </div> <div class="symbol"> <a href="#losses.meanSquaredError">tf.losses.meanSquaredError</a> </div> <div class="symbol"> <a href="#losses.sigmoidCrossEntropy">tf.losses.sigmoidCrossEntropy</a> </div> <div class="symbol"> <a href="#losses.softmaxCrossEntropy">tf.losses.softmaxCrossEntropy</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Training-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:train.Optimizer">tf.train.Optimizer</a> <div class="method-list"> <div class="method"> <a href="#tf.train.Optimizer.minimize">.minimize</a> </div> <div class="method"> <a href="#tf.train.Optimizer.computeGradients">.computeGradients</a> </div> <div class="method"> <a href="#tf.train.Optimizer.applyGradients">.applyGradients</a> </div> </div> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Performance">Performance</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Performance-Memory">Memory</a> </div> <div class="symbol"> <a href="#tidy">tf.tidy</a> </div> <div class="symbol"> <a href="#dispose">tf.dispose</a> </div> <div class="symbol"> <a href="#keep">tf.keep</a> </div> <div class="symbol"> <a href="#memory">tf.memory</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Performance-Timing">Timing</a> </div> <div class="symbol"> <a href="#time">tf.time</a> </div> <div class="symbol"> <a href="#nextFrame">tf.nextFrame</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Performance-Profile">Profile</a> </div> <div class="symbol"> <a href="#profile">tf.profile</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Environment">Environment</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Environment-"></a> </div> <div class="symbol"> <a href="#class:Environment">tf.Environment</a> <div class="method-list"> </div> </div> <div class="symbol"> <a href="#disposeVariables">tf.disposeVariables</a> </div> <div class="symbol"> <a href="#enableDebugMode">tf.enableDebugMode</a> </div> <div class="symbol"> <a href="#enableProdMode">tf.enableProdMode</a> </div> <div class="symbol"> <a href="#engine">tf.engine</a> </div> <div class="symbol"> <a href="#env">tf.env</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Constraints">Constraints</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Constraints-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:constraints.Constraint">tf.constraints.Constraint</a> <div class="method-list"> </div> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Constraints-"></a> </div> <div class="symbol"> <a href="#constraints.maxNorm">tf.constraints.maxNorm</a> </div> <div class="symbol"> <a href="#constraints.minMaxNorm">tf.constraints.minMaxNorm</a> </div> <div class="symbol"> <a href="#constraints.nonNeg">tf.constraints.nonNeg</a> </div> <div class="symbol"> <a href="#constraints.unitNorm">tf.constraints.unitNorm</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Initializers">Initializers</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Initializers-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:initializers.Initializer">tf.initializers.Initializer</a> <div class="method-list"> </div> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Initializers-"></a> </div> <div class="symbol"> <a href="#initializers.constant">tf.initializers.constant</a> </div> <div class="symbol"> <a href="#initializers.glorotNormal">tf.initializers.glorotNormal</a> </div> <div class="symbol"> <a href="#initializers.glorotUniform">tf.initializers.glorotUniform</a> </div> <div class="symbol"> <a href="#initializers.heNormal">tf.initializers.heNormal</a> </div> <div class="symbol"> <a href="#initializers.heUniform">tf.initializers.heUniform</a> </div> <div class="symbol"> <a href="#initializers.identity">tf.initializers.identity</a> </div> <div class="symbol"> <a href="#initializers.leCunNormal">tf.initializers.leCunNormal</a> </div> <div class="symbol"> <a href="#initializers.leCunUniform">tf.initializers.leCunUniform</a> </div> <div class="symbol"> <a href="#initializers.ones">tf.initializers.ones</a> </div> <div class="symbol"> <a href="#initializers.orthogonal">tf.initializers.orthogonal</a> </div> <div class="symbol"> <a href="#initializers.randomNormal">tf.initializers.randomNormal</a> </div> <div class="symbol"> <a href="#initializers.randomUniform">tf.initializers.randomUniform</a> </div> <div class="symbol"> <a href="#initializers.truncatedNormal">tf.initializers.truncatedNormal</a> </div> <div class="symbol"> <a href="#initializers.varianceScaling">tf.initializers.varianceScaling</a> </div> <div class="symbol"> <a href="#initializers.zeros">tf.initializers.zeros</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Regularizers">Regularizers</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Regularizers-"></a> </div> <div class="symbol"> <a href="#regularizers.l1">tf.regularizers.l1</a> </div> <div class="symbol"> <a href="#regularizers.l1l2">tf.regularizers.l1l2</a> </div> <div class="symbol"> <a href="#regularizers.l2">tf.regularizers.l2</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Data">Data</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Data-Creation">Creation</a> </div> <div class="symbol"> <a href="#data.array">tf.data.array</a> </div> <div class="symbol"> <a href="#data.csv">tf.data.csv</a> </div> <div class="symbol"> <a href="#data.generator">tf.data.generator</a> </div> <div class="symbol"> <a href="#data.microphone">tf.data.microphone</a> </div> <div class="symbol"> <a href="#data.webcam">tf.data.webcam</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Data-Operations">Operations</a> </div> <div class="symbol"> <a href="#data.zip">tf.data.zip</a> </div> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Data-Classes">Classes</a> </div> <div class="symbol"> <a href="#class:data.CSVDataset">tf.data.CSVDataset</a> <div class="method-list"> <div class="method"> <a href="#tf.data.CSVDataset.columnNames">.columnNames</a> </div> </div> </div> <div class="symbol"> <a href="#class:data.Dataset">tf.data.Dataset</a> <div class="method-list"> <div class="method"> <a href="#tf.data.Dataset.batch">.batch</a> </div> <div class="method"> <a href="#tf.data.Dataset.concatenate">.concatenate</a> </div> <div class="method"> <a href="#tf.data.Dataset.filter">.filter</a> </div> <div class="method"> <a href="#tf.data.Dataset.forEachAsync">.forEachAsync</a> </div> <div class="method"> <a href="#tf.data.Dataset.map">.map</a> </div> <div class="method"> <a href="#tf.data.Dataset.mapAsync">.mapAsync</a> </div> <div class="method"> <a href="#tf.data.Dataset.prefetch">.prefetch</a> </div> <div class="method"> <a href="#tf.data.Dataset.repeat">.repeat</a> </div> <div class="method"> <a href="#tf.data.Dataset.skip">.skip</a> </div> <div class="method"> <a href="#tf.data.Dataset.shuffle">.shuffle</a> </div> <div class="method"> <a href="#tf.data.Dataset.take">.take</a> </div> <div class="method"> <a href="#tf.data.Dataset.toArray">.toArray</a> </div> </div> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Visualization">Visualization</a> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Util">Util</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Util-"></a> </div> <div class="symbol"> <a href="#util.assert">tf.util.assert</a> </div> <div class="symbol"> <a href="#util.createShuffledIndices">tf.util.createShuffledIndices</a> </div> <div class="symbol"> <a href="#decodeString">tf.decodeString</a> </div> <div class="symbol"> <a href="#encodeString">tf.encodeString</a> </div> <div class="symbol"> <a href="#fetch">tf.fetch</a> </div> <div class="symbol"> <a href="#util.flatten">tf.util.flatten</a> </div> <div class="symbol"> <a href="#util.now">tf.util.now</a> </div> <div class="symbol"> <a href="#util.shuffle">tf.util.shuffle</a> </div> <div class="symbol"> <a href="#util.shuffleCombo">tf.util.shuffleCombo</a> </div> <div class="symbol"> <a href="#util.sizeFromShape">tf.util.sizeFromShape</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Backends">Backends</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Backends-"></a> </div> <div class="symbol"> <a href="#backend">tf.backend</a> </div> <div class="symbol"> <a href="#getBackend">tf.getBackend</a> </div> <div class="symbol"> <a href="#ready">tf.ready</a> </div> <div class="symbol"> <a href="#registerBackend">tf.registerBackend</a> </div> <div class="symbol"> <a href="#removeBackend">tf.removeBackend</a> </div> <div class="symbol"> <a href="#setBackend">tf.setBackend</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Browser">Browser</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Browser-"></a> </div> <div class="symbol"> <a href="#browser.draw">tf.browser.draw</a> </div> <div class="symbol"> <a href="#browser.fromPixels">tf.browser.fromPixels</a> </div> <div class="symbol"> <a href="#browser.fromPixelsAsync">tf.browser.fromPixelsAsync</a> </div> <div class="symbol"> <a href="#browser.toPixels">tf.browser.toPixels</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Metrics">Metrics</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Metrics-"></a> </div> <div class="symbol"> <a href="#metrics.binaryAccuracy">tf.metrics.binaryAccuracy</a> </div> <div class="symbol"> <a href="#metrics.binaryCrossentropy">tf.metrics.binaryCrossentropy</a> </div> <div class="symbol"> <a href="#metrics.categoricalAccuracy">tf.metrics.categoricalAccuracy</a> </div> <div class="symbol"> <a href="#metrics.categoricalCrossentropy">tf.metrics.categoricalCrossentropy</a> </div> <div class="symbol"> <a href="#metrics.cosineProximity">tf.metrics.cosineProximity</a> </div> <div class="symbol"> <a href="#metrics.meanAbsoluteError">tf.metrics.meanAbsoluteError</a> </div> <div class="symbol"> <a href="#metrics.meanAbsolutePercentageError">tf.metrics.meanAbsolutePercentageError</a> </div> <div class="symbol"> <a href="#metrics.meanSquaredError">tf.metrics.meanSquaredError</a> </div> <div class="symbol"> <a href="#metrics.precision">tf.metrics.precision</a> </div> <div class="symbol"> <a href="#metrics.r2Score">tf.metrics.r2Score</a> </div> <div class="symbol"> <a href="#metrics.recall">tf.metrics.recall</a> </div> <div class="symbol"> <a href="#metrics.sparseCategoricalAccuracy">tf.metrics.sparseCategoricalAccuracy</a> </div> </div> </div> <div class="heading"> <div class="symbol"> <a class="heading-name" href="#Callbacks">Callbacks</a> </div> <div class="subheading"> <div class="symbol"> <a class="subheading-name" href="#Callbacks-"></a> </div> <div class="symbol"> <a href="#callbacks.earlyStopping">tf.callbacks.earlyStopping</a> </div> </div> </div> </div> </div> <div class="reference"> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Tensors" href="#Tensors" class="symbol-link">Tensors</a> </div> <div class="description"> <p>Tensors are the core datastructure of TensorFlow.js They are a generalization of vectors and matrices to potentially higher dimensions.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Tensors-Creation" href="#Tensors-Creation" class="symbol-link"> Tensors / Creation </a> </div> <div class="description"> <p>We have utility functions for common cases like Scalar, 1D, 2D, 3D and 4D tensors, as well a number of functions to initialize tensors in ways useful for machine learning.</p> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor" href="#tensor"> tf.tensor</a> <span class="signature">(values, shape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor.ts#L204-L209" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass an array of values to create a vector.</span> tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a nested array of values to make a matrix or a higher</span> <span class="hljs-comment">// dimensional tensor.</span> tf.<span class="hljs-title function_">tensor</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a flat array and specify a shape yourself.</span> tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a `WebGLData` object and specify a shape yourself.</span> <span class="hljs-comment">// This makes it possible for TF.js applications to avoid GPU / CPU sync.</span> <span class="hljs-comment">// For example, if your application includes a preprocessing step on the GPU,</span> <span class="hljs-comment">// you could upload the GPU output directly to TF.js, rather than first</span> <span class="hljs-comment">// downloading the values.</span> <span class="hljs-comment">// Example for WebGL2:</span> <span class="hljs-keyword">if</span> (tf.<span class="hljs-title function_">findBackend</span>(<span class="hljs-string">&#x27;custom-webgl&#x27;</span>) == <span class="hljs-literal">null</span>) { <span class="hljs-keyword">const</span> customCanvas = <span class="hljs-variable language_">document</span>.<span class="hljs-title function_">createElement</span>(<span class="hljs-string">&#x27;canvas&#x27;</span>); <span class="hljs-keyword">const</span> customBackend = <span class="hljs-keyword">new</span> tf.<span class="hljs-title class_">MathBackendWebGL</span>(customCanvas); tf.<span class="hljs-title function_">registerBackend</span>(<span class="hljs-string">&#x27;custom-webgl&#x27;</span>, <span class="hljs-function">() =&gt;</span> customBackend); } <span class="hljs-keyword">const</span> savedBackend = tf.<span class="hljs-title function_">getBackend</span>(); <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">setBackend</span>(<span class="hljs-string">&#x27;custom-webgl&#x27;</span>); <span class="hljs-keyword">const</span> gl = tf.<span class="hljs-title function_">backend</span>().<span class="hljs-property">gpgpu</span>.<span class="hljs-property">gl</span>; <span class="hljs-keyword">const</span> texture = gl.<span class="hljs-title function_">createTexture</span>(); <span class="hljs-keyword">const</span> tex2d = gl.<span class="hljs-property">TEXTURE_2D</span>; <span class="hljs-keyword">const</span> width = <span class="hljs-number">2</span>; <span class="hljs-keyword">const</span> height = <span class="hljs-number">2</span>; gl.<span class="hljs-title function_">bindTexture</span>(tex2d, texture); gl.<span class="hljs-title function_">texParameteri</span>(tex2d, gl.<span class="hljs-property">TEXTURE_WRAP_S</span>, gl.<span class="hljs-property">CLAMP_TO_EDGE</span>); gl.<span class="hljs-title function_">texParameteri</span>(tex2d, gl.<span class="hljs-property">TEXTURE_WRAP_T</span>, gl.<span class="hljs-property">CLAMP_TO_EDGE</span>); gl.<span class="hljs-title function_">texParameteri</span>(tex2d, gl.<span class="hljs-property">TEXTURE_MIN_FILTER</span>, gl.<span class="hljs-property">NEAREST</span>); gl.<span class="hljs-title function_">texParameteri</span>(tex2d, gl.<span class="hljs-property">TEXTURE_MAG_FILTER</span>, gl.<span class="hljs-property">NEAREST</span>); gl.<span class="hljs-title function_">texImage2D</span>( tex2d, <span class="hljs-number">0</span>, gl.<span class="hljs-property">RGBA32F</span>, <span class="hljs-comment">// internalFormat</span> width, height, <span class="hljs-number">0</span>, gl.<span class="hljs-property">RGBA</span>, <span class="hljs-comment">// textureFormat</span> gl.<span class="hljs-property">FLOAT</span>, <span class="hljs-comment">// textureType</span> <span class="hljs-keyword">new</span> <span class="hljs-title class_">Float32Array</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>]) ); <span class="hljs-comment">// Currently, the `texture` has 4 pixels:</span> <span class="hljs-comment">// Pixel0 is {R:0, G:1, B:2, A:3}</span> <span class="hljs-comment">// Pixel1 is {R:4, G:5, B:6, A:7}</span> <span class="hljs-comment">// Pixel2 is {R:8, G:9, B:10, A:11}</span> <span class="hljs-comment">// Pixel3 is {R:12, G:13, B:14, A:15}</span> <span class="hljs-keyword">const</span> logicalShape = [height * width * <span class="hljs-number">2</span>]; <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor</span>({texture, height, width, <span class="hljs-attr">channels</span>: <span class="hljs-string">&#x27;BR&#x27;</span>}, logicalShape); a.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// Tensor value will be [2, 0, 6, 4, 10, 8, 14, 12], since [2, 0] is the</span> <span class="hljs-comment">// values of &#x27;B&#x27; and &#x27;R&#x27; channels of Pixel0, [6, 4] is the values of &#x27;B&#x27; and</span> <span class="hljs-string">&#x27;R&#x27;</span> <span class="hljs-comment">// channels of Pixel1...</span> <span class="hljs-comment">// For postprocessing on the GPU, it&#x27;s possible to retrieve the texture</span> <span class="hljs-comment">// backing any tensor by calling the tensor&#x27;s `dataToGPU` method like</span> <span class="hljs-comment">// so:</span> <span class="hljs-keyword">const</span> tex = a.<span class="hljs-title function_">dataToGPU</span>(); <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">setBackend</span>(savedBackend); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a `WebGPUData` object and specify a shape yourself.</span> <span class="hljs-comment">// This makes it possible for TF.js applications to avoid GPU / CPU sync.</span> <span class="hljs-comment">// For example, if your application includes a preprocessing step on the GPU,</span> <span class="hljs-comment">// you could upload the GPU output directly to TF.js, rather than first</span> <span class="hljs-comment">// downloading the values. Unlike WebGL, this optionally supports zero copy</span> <span class="hljs-comment">// by WebGPUData.zeroCopy. When zeroCopy is false or undefined(default), this</span> <span class="hljs-comment">// passing GPUBuffer can be destroyed after tensor is created. When zeroCopy</span> <span class="hljs-comment">// is true, this GPUBuffer is bound directly by the tensor, so do not destroy</span> <span class="hljs-comment">// this GPUBuffer until all access is done.</span> <span class="hljs-comment">// Example for WebGPU:</span> <span class="hljs-keyword">function</span> <span class="hljs-title function_">createGPUBufferFromData</span>(<span class="hljs-params">device, data, dtype</span>) { <span class="hljs-keyword">const</span> bytesPerElement = <span class="hljs-number">4</span>; <span class="hljs-keyword">const</span> sizeInBytes = data.<span class="hljs-property">length</span> * bytesPerElement; <span class="hljs-keyword">const</span> gpuWriteBuffer = device.<span class="hljs-title function_">createBuffer</span>({ <span class="hljs-attr">mappedAtCreation</span>: <span class="hljs-literal">true</span>, <span class="hljs-attr">size</span>: sizeInBytes, <span class="hljs-attr">usage</span>: <span class="hljs-title class_">GPUBufferUsage</span>.<span class="hljs-property">MAP_WRITE</span> | <span class="hljs-title class_">GPUBufferUsage</span>.<span class="hljs-property">COPY_SRC</span> }); <span class="hljs-keyword">const</span> arrayBuffer = gpuWriteBuffer.<span class="hljs-title function_">getMappedRange</span>(); <span class="hljs-keyword">if</span> (dtype === <span class="hljs-string">&#x27;float32&#x27;</span>) { <span class="hljs-keyword">new</span> <span class="hljs-title class_">Float32Array</span>(arrayBuffer).<span class="hljs-title function_">set</span>(data); } <span class="hljs-keyword">else</span> <span class="hljs-keyword">if</span> (dtype === <span class="hljs-string">&#x27;int32&#x27;</span>) { <span class="hljs-keyword">new</span> <span class="hljs-title class_">Int32Array</span>(arrayBuffer).<span class="hljs-title function_">set</span>(data); } <span class="hljs-keyword">else</span> { <span class="hljs-keyword">throw</span> <span class="hljs-keyword">new</span> <span class="hljs-title class_">Error</span>( <span class="hljs-string">`Creating tensor from GPUBuffer only supports`</span> + <span class="hljs-string">`&#x27;float32&#x27;|&#x27;int32&#x27; dtype, while the dtype is <span class="hljs-subst">${dtype}</span>.`</span>); } gpuWriteBuffer.<span class="hljs-title function_">unmap</span>(); <span class="hljs-keyword">const</span> gpuReadBuffer = device.<span class="hljs-title function_">createBuffer</span>({ <span class="hljs-attr">mappedAtCreation</span>: <span class="hljs-literal">false</span>, <span class="hljs-attr">size</span>: sizeInBytes, <span class="hljs-attr">usage</span>: <span class="hljs-title class_">GPUBufferUsage</span>.<span class="hljs-property">COPY_DST</span> | <span class="hljs-title class_">GPUBufferUsage</span>.<span class="hljs-property">STORAGE</span> | <span class="hljs-title class_">GPUBufferUsage</span>.<span class="hljs-property">COPY_SRC</span> }); <span class="hljs-keyword">const</span> copyEncoder = device.<span class="hljs-title function_">createCommandEncoder</span>(); copyEncoder.<span class="hljs-title function_">copyBufferToBuffer</span>( gpuWriteBuffer, <span class="hljs-number">0</span>, gpuReadBuffer, <span class="hljs-number">0</span>, sizeInBytes); <span class="hljs-keyword">const</span> copyCommands = copyEncoder.<span class="hljs-title function_">finish</span>(); device.<span class="hljs-property">queue</span>.<span class="hljs-title function_">submit</span>([copyCommands]); gpuWriteBuffer.<span class="hljs-title function_">destroy</span>(); <span class="hljs-keyword">return</span> gpuReadBuffer; } <span class="hljs-keyword">const</span> savedBackend = tf.<span class="hljs-title function_">getBackend</span>(); <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">setBackend</span>(<span class="hljs-string">&#x27;webgpu&#x27;</span>).<span class="hljs-title function_">catch</span>( <span class="hljs-function">() =&gt;</span> {<span class="hljs-keyword">throw</span> <span class="hljs-keyword">new</span> <span class="hljs-title class_">Error</span>( <span class="hljs-string">&#x27;Failed to use WebGPU backend. Please use Chrome Canary to run.&#x27;</span>)}); <span class="hljs-keyword">const</span> dtype = <span class="hljs-string">&#x27;float32&#x27;</span>; <span class="hljs-keyword">const</span> device = tf.<span class="hljs-title function_">backend</span>().<span class="hljs-property">device</span>; <span class="hljs-keyword">const</span> aData = [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>, <span class="hljs-number">16</span>]; <span class="hljs-keyword">const</span> bData = [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]; <span class="hljs-keyword">const</span> expected = [<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>, <span class="hljs-number">10</span>, <span class="hljs-number">12</span>, <span class="hljs-number">10</span>, <span class="hljs-number">12</span>, <span class="hljs-number">14</span>, <span class="hljs-number">16</span>, <span class="hljs-number">14</span>, <span class="hljs-number">16</span>, <span class="hljs-number">18</span>, <span class="hljs-number">20</span>]; <span class="hljs-keyword">const</span> aBuffer = <span class="hljs-title function_">createGPUBufferFromData</span>(device, aData, dtype); <span class="hljs-keyword">const</span> shape = [aData.<span class="hljs-property">length</span>]; <span class="hljs-comment">// To use zeroCopy, use {buffer: aBuffer, zeroCopy: true} instead and destroy</span> <span class="hljs-comment">// aBuffer untill all access is done.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor</span>({<span class="hljs-attr">buffer</span>: aBuffer}, shape, dtype); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor</span>(bData, shape, dtype); <span class="hljs-keyword">const</span> result = tf.<span class="hljs-title function_">add</span>(a, b); result.<span class="hljs-title function_">print</span>(); a.<span class="hljs-title function_">dispose</span>(); b.<span class="hljs-title function_">dispose</span>(); result.<span class="hljs-title function_">dispose</span>(); aBuffer.<span class="hljs-title function_">destroy</span>(); <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">setBackend</span>(savedBackend); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array|WebGLData|WebGPUData)</span> <span class="param-docs">The values of the tensor. Can be nested array of numbers, or a flat array, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>(At the moment it supports Uint8Array, Uint8ClampedArray, Int32Array, Float32Array) data types, or a <code>WebGLData</code> object, or a <code>WebGPUData</code> object. If the values are strings, they will be encoded as utf-8 and kept as <code>Uint8Array[]</code>. If the values is a <code>WebGLData</code> object, the dtype could only be 'float32' or 'int32' and the object has to have: 1. texture, a <code>WebGLTexture</code>, the texture must share the same <code>WebGLRenderingContext</code> with TFJS's WebGL backend (you could create a custom WebGL backend from your texture's canvas) and the internal texture format for the input texture must be floating point or normalized integer; 2. height, the height of the texture; 3. width, the width of the texture; 4. channels, a non-empty subset of 'RGBA', indicating the values of which channels will be passed to the tensor, such as 'R' or 'BR' (The order of the channels affect the order of tensor values. ). (If the values passed from texture is less than the tensor size, zeros will be padded at the rear.). If the values is a <code>WebGPUData</code> object, the dtype could only be 'float32' or 'int32 and the object has to have: buffer, a <code>GPUBuffer</code>. The buffer must:</p> <ol> <li>share the same <code>GPUDevice</code> with TFJS's WebGPU backend; 2. buffer.usage should at least support GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC; 3. buffer.size should not be smaller than the byte size of tensor shape. WebGPUData optionally supports zero copy by flag zeroCopy. When zeroCopy is false or undefined(default),this passing GPUBuffer can be destroyed after tensor is created. When zeroCopy is true, this GPUBuffer is bound directly by the tensor, so do not destroy this GPUBuffer until all access is done.</li> </ol> </span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">The shape of the tensor. Optional. If not provided, it is inferred from <code>values</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="scalar" href="#scalar"> tf.scalar</a> <span class="signature">(value, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/scalar.ts#L38-L55" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-0 <a href="#class:Tensor">tf.Tensor</a> (scalar) with the provided value and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#scalar">tf.scalar()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">3.14</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">value</span> <span class="param-type">(number|boolean|string|Uint8Array)</span> <span class="param-docs">The value of the scalar.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Scalar</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor1d" href="#tensor1d"> tf.tensor1d</a> <span class="signature">(values, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor1d.ts#L41-L49" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-1 <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#tensor1d">tf.tensor1d()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The values of the tensor. Can be array of numbers, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor2d" href="#tensor2d"> tf.tensor2d</a> <span class="signature">(values, shape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor2d.ts#L48-L66" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-2 <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#tensor2d">tf.tensor2d()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a nested array.</span> tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a flat array and specify a shape.</span> tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The values of the tensor. Can be nested array of numbers, or a flat array, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">([number, number])</span> <span class="param-docs">The shape of the tensor. If not provided, it is inferred from <code>values</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor3d" href="#tensor3d"> tf.tensor3d</a> <span class="signature">(values, shape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor3d.ts#L48-L66" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-3 <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#tensor3d">tf.tensor3d()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a nested array.</span> tf.<span class="hljs-title function_">tensor3d</span>([[[<span class="hljs-number">1</span>], [<span class="hljs-number">2</span>]], [[<span class="hljs-number">3</span>], [<span class="hljs-number">4</span>]]]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a flat array and specify a shape.</span> tf.<span class="hljs-title function_">tensor3d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The values of the tensor. Can be nested array of numbers, or a flat array, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">([number, number, number])</span> <span class="param-docs">The shape of the tensor. If not provided, it is inferred from <code>values</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor4d" href="#tensor4d"> tf.tensor4d</a> <span class="signature">(values, shape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor4d.ts#L48-L66" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-4 <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#tensor4d">tf.tensor4d()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a nested array.</span> tf.<span class="hljs-title function_">tensor4d</span>([[[[<span class="hljs-number">1</span>], [<span class="hljs-number">2</span>]], [[<span class="hljs-number">3</span>], [<span class="hljs-number">4</span>]]]]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a flat array and specify a shape.</span> tf.<span class="hljs-title function_">tensor4d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The values of the tensor. Can be nested array of numbers, or a flat array, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">([number, number, number, number])</span> <span class="param-docs">The shape of the tensor. Optional. If not provided, it is inferred from <code>values</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor5d" href="#tensor5d"> tf.tensor5d</a> <span class="signature">(values, shape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor5d.ts#L48-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-5 <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#tensor5d">tf.tensor5d()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a nested array.</span> tf.<span class="hljs-title function_">tensor5d</span>([[[[[<span class="hljs-number">1</span>],[<span class="hljs-number">2</span>]],[[<span class="hljs-number">3</span>],[<span class="hljs-number">4</span>]]],[[[<span class="hljs-number">5</span>],[<span class="hljs-number">6</span>]],[[<span class="hljs-number">7</span>],[<span class="hljs-number">8</span>]]]]]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a flat array and specify a shape.</span> tf.<span class="hljs-title function_">tensor5d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The values of the tensor. Can be nested array of numbers, or a flat array, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">([number, number, number, number, number])</span> <span class="param-docs">The shape of the tensor. Optional. If not provided, it is inferred from <code>values</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor5D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensor6d" href="#tensor6d"> tf.tensor6d</a> <span class="signature">(values, shape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor6d.ts#L48-L70" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates rank-6 <a href="#class:Tensor">tf.Tensor</a> with the provided values, shape and dtype.</p> <p>The same functionality can be achieved with <a href="#tensor">tf.tensor()</a>, but in general we recommend using <a href="#tensor6d">tf.tensor6d()</a> as it makes the code more readable.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a nested array.</span> tf.<span class="hljs-title function_">tensor6d</span>([[[[[[<span class="hljs-number">1</span>],[<span class="hljs-number">2</span>]],[[<span class="hljs-number">3</span>],[<span class="hljs-number">4</span>]]],[[[<span class="hljs-number">5</span>],[<span class="hljs-number">6</span>]],[[<span class="hljs-number">7</span>],[<span class="hljs-number">8</span>]]]]]]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Pass a flat array and specify a shape.</span> tf.<span class="hljs-title function_">tensor6d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The values of the tensor. Can be nested array of numbers, or a flat array, or a <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">([number, number, number, number, number, number])</span> <span class="param-docs">The shape of the tensor. Optional. If not provided, it is inferred from <code>values</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor6D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="buffer" href="#buffer"> tf.buffer</a> <span class="signature">(shape, dtype?, values?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/buffer.ts#L48-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates an empty <a href="#class:TensorBuffer">tf.TensorBuffer</a> with the specified <code>shape</code> and <code>dtype</code>.</p> <p>The values are stored in CPU as <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>. Fill the buffer using <code>buffer.set()</code>, or by modifying directly <code>buffer.values</code>.</p> <p>When done, call <code>buffer.toTensor()</code> to get an immutable <a href="#class:Tensor">tf.Tensor</a> with those values.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Create a buffer and set values at particular indices.</span> <span class="hljs-keyword">const</span> buffer = tf.<span class="hljs-title function_">buffer</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); buffer.<span class="hljs-title function_">set</span>(<span class="hljs-number">3</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>); buffer.<span class="hljs-title function_">set</span>(<span class="hljs-number">5</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>); <span class="hljs-comment">// Convert the buffer back to a tensor.</span> buffer.<span class="hljs-title function_">toTensor</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32')</span> <span class="param-docs">The dtype of the buffer. Defaults to 'float32'.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(DataTypeMap['float32'])</span> <span class="param-docs">The values of the buffer as <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>. Defaults to zeros.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:TensorBuffer">tf.TensorBuffer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="clone" href="#clone"> tf.clone</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/clone.ts#L41-L48" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a new tensor with the same values and shape as the specified tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); x.<span class="hljs-title function_">clone</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to clone.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="complex" href="#complex"> tf.complex</a> <span class="signature">(real, imag)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/complex.ts#L47-L57" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Converts two real numbers to a complex number.</p> <p>Given a tensor <code>real</code> representing the real part of a complex number, and a tensor <code>imag</code> representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form [r0, i0, r1, i1], where r represents the real part and i represents the imag part.</p> <p>The input tensors real and imag must have the same shape.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> real = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2.25</span>, <span class="hljs-number">3.25</span>]); <span class="hljs-keyword">const</span> imag = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">4.75</span>, <span class="hljs-number">5.75</span>]); <span class="hljs-keyword">const</span> complex = tf.<span class="hljs-title function_">complex</span>(real, imag); complex.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">real</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">imag</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="diag" href="#diag"> tf.diag</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/diag.ts#L49-L55" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a diagonal tensor with given diagonal values.</p> <p>Given a diagonal, this operation returns a tensor with the diagonal and everything else padded with zeros.</p> <p>Assume the input has dimensions <code>[D1,..., Dk]</code>, then the output is a tensor of rank 2k with dimensions <code>[D1,..., Dk, D1,..., Dk]</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); tf.<span class="hljs-title function_">diag</span>(x).<span class="hljs-title function_">print</span>() </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">2</span>]) tf.<span class="hljs-title function_">diag</span>(x).<span class="hljs-title function_">print</span>() </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="eye" href="#eye"> tf.eye</a> <span class="signature">(numRows, numColumns?, batchShape?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/eye.ts#L41-L80" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create an identity matrix.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">numRows</span> <span class="param-type">(number)</span> <span class="param-docs">Number of rows.</span> </li> <li class="parameter"> <span class="param-name">numColumns</span> <span class="param-type">(number)</span> <span class="param-docs">Number of columns. Defaults to <code>numRows</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">batchShape</span> <span class="param-type">([ number ]|[number, number]|[number, number, number]|[number, number, number, number])</span> <span class="param-docs">If provided, will add the batch shape to the beginning of the shape of the returned <a href="#class:Tensor">tf.Tensor</a> by repeating the identity matrix.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Data type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="fill" href="#fill"> tf.fill</a> <span class="signature">(shape, value, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/fill.ts#L40-L48" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> filled with a scalar value.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">fill</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], <span class="hljs-number">4</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">value</span> <span class="param-type">(number|string)</span> <span class="param-docs">The scalar value to fill the tensor with.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The type of an element in the resulting tensor. Defaults to 'float32' if the given param value is a number, otherwise 'string'.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="imag" href="#imag"> tf.imag</a> <span class="signature">(input)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/imag.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the imaginary part of a complex (or real) tensor.</p> <p>Given a tensor input, this operation returns a tensor of type float that is the imaginary part of each element in input considered as a complex number. If input is real, a tensor of all zeros is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">complex</span>([-<span class="hljs-number">2.25</span>, <span class="hljs-number">3.25</span>], [<span class="hljs-number">4.75</span>, <span class="hljs-number">5.75</span>]); tf.<span class="hljs-title function_">imag</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="linspace" href="#linspace"> tf.linspace</a> <span class="signature">(start, stop, num)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/linspace.ts#L35-L42" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Return an evenly spaced sequence of numbers over the given interval.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">linspace</span>(<span class="hljs-number">0</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">start</span> <span class="param-type">(number)</span> <span class="param-docs">The start value of the sequence.</span> </li> <li class="parameter"> <span class="param-name">stop</span> <span class="param-type">(number)</span> <span class="param-docs">The end value of the sequence.</span> </li> <li class="parameter"> <span class="param-name">num</span> <span class="param-type">(number)</span> <span class="param-docs">The number of values to generate.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="oneHot" href="#oneHot"> tf.oneHot</a> <span class="signature">(indices, depth, onValue?, offValue?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/one_hot.ts#L52-L66" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a one-hot <a href="#class:Tensor">tf.Tensor</a>. The locations represented by <code>indices</code> take value <code>onValue</code> (defaults to 1), while all other locations take value <code>offValue</code> (defaults to 0). If <code>indices</code> is rank <code>R</code>, the output has rank <code>R+1</code> with the last axis of size <code>depth</code>. <code>indices</code> used to encode prediction class must start from 0. For example, if you have 3 classes of data, class 1 should be encoded as 0, class 2 should be 1, and class 3 should be 2.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">oneHot</span>(tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), <span class="hljs-number">3</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"><a href="#class:Tensor">tf.Tensor</a> of indices with dtype <code>int32</code>. Indices must start from 0.</span> </li> <li class="parameter"> <span class="param-name">depth</span> <span class="param-type">(number)</span> <span class="param-docs">The depth of the one hot dimension.</span> </li> <li class="parameter"> <span class="param-name">onValue</span> <span class="param-type">(number)</span> <span class="param-docs">A number used to fill in the output when the index matches the location.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">offValue</span> <span class="param-type">(number)</span> <span class="param-docs">A number used to fill in the output when the index does not match the location.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The dtype of the output tensor, default to 'int32'.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="ones" href="#ones"> tf.ones</a> <span class="signature">(shape, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/ones.ts#L39-L49" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with all elements set to 1.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The type of an element in the resulting tensor. Defaults to 'float'.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="onesLike" href="#onesLike"> tf.onesLike</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/ones_like.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with all elements set to 1 with the same shape as the given tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); tf.<span class="hljs-title function_">onesLike</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="print" href="#print"> tf.print</a> <span class="signature">(x, verbose?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/print.ts#L33-L35" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Prints information about the <a href="#class:Tensor">tf.Tensor</a> including its data.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> verbose = <span class="hljs-literal">true</span>; tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(verbose); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The tensor to be printed.</span> </li> <li class="parameter"> <span class="param-name">verbose</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to print verbose information about the <code> Tensor</code>, including dtype and size.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="range" href="#range"> tf.range</a> <span class="signature">(start, stop, step?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/range.ts#L42-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a new <a href="#class:Tensor">tf.Tensor1D</a> filled with the numbers in the range provided.</p> <p>The tensor is a half-open interval meaning it includes start, but excludes stop. Decrementing ranges and negative step values are also supported.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">range</span>(<span class="hljs-number">0</span>, <span class="hljs-number">9</span>, <span class="hljs-number">2</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">start</span> <span class="param-type">(number)</span> <span class="param-docs">An integer start value</span> </li> <li class="parameter"> <span class="param-name">stop</span> <span class="param-type">(number)</span> <span class="param-docs">An integer stop value</span> </li> <li class="parameter"> <span class="param-name">step</span> <span class="param-type">(number)</span> <span class="param-docs">An integer increment (will default to 1 or -1)</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32')</span> <span class="param-docs">The data type of the output tensor. Defaults to 'float32'.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="real" href="#real"> tf.real</a> <span class="signature">(input)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/real.ts#L41-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the real part of a complex (or real) tensor.</p> <p>Given a tensor input, this operation returns a tensor of type float that is the real part of each element in input considered as a complex number.</p> <p>If the input is real, it simply makes a clone.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">complex</span>([-<span class="hljs-number">2.25</span>, <span class="hljs-number">3.25</span>], [<span class="hljs-number">4.75</span>, <span class="hljs-number">5.75</span>]); tf.<span class="hljs-title function_">real</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="truncatedNormal" href="#truncatedNormal"> tf.truncatedNormal</a> <span class="signature">(shape, mean?, stdDev?, dtype?, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/truncated_normal.ts#L46-L60" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values sampled from a truncated normal distribution.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">truncatedNormal</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> <p>The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">mean</span> <span class="param-type">(number)</span> <span class="param-docs">The mean of the normal distribution.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">stdDev</span> <span class="param-type">(number)</span> <span class="param-docs">The standard deviation of the normal distribution.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32')</span> <span class="param-docs">The data type of the output tensor.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">The seed for the random number generator.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="variable" href="#variable"> tf.variable</a> <span class="signature">(initialValue, trainable?, name?, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/variable.ts#L38-L43" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a new variable with the provided initial value.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">variable</span>(tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>])); x.<span class="hljs-title function_">assign</span>(tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>])); x.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">initialValue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Initial value for the tensor.</span> </li> <li class="parameter"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, optimizers are allowed to update it.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of the variable. Defaults to a unique id.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">If set, initialValue will be converted to the given type.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Variable">tf.Variable</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="zeros" href="#zeros"> tf.zeros</a> <span class="signature">(shape, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/zeros.ts#L38-L48" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with all elements set to 0.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">zeros</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The type of an element in the resulting tensor. Can be 'float32', 'int32' or 'bool'. Defaults to 'float'.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="zerosLike" href="#zerosLike"> tf.zerosLike</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/zeros_like.ts#L40-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with all elements set to 0 with the same shape as the given tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); tf.<span class="hljs-title function_">zerosLike</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor of required shape.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Tensors-Classes" href="#Tensors-Classes" class="symbol-link"> Tensors / Classes </a> </div> <div class="description"> <p> This section shows the main Tensor related classes in TensorFlow.js and the methods we expose on them. </p> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:Tensor" href="#class:Tensor">tf.Tensor</a> <span class="signature"> <span>extends TensorInfo</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L257-L505" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A <a href="#class:Tensor">tf.Tensor</a> object represents an immutable, multidimensional array of numbers that has a shape and a data type.</p> <p>For performance reasons, functions that create tensors do not necessarily perform a copy of the data passed to them (e.g. if the data is passed as a <code>Float32Array</code>), and changes to the data will change the tensor. This is not a feature and is not supported. To avoid this behavior, use the tensor before changing the input data or create a copy with <code>copy = tf.add(yourTensor, 0)</code>.</p> <p>See <a href="#tensor">tf.tensor()</a> for details on how to create a <a href="#class:Tensor">tf.Tensor</a>.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.buffer" href="#tf.Tensor.buffer"> buffer</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L307-L310" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a promise of <a href="#class:TensorBuffer">tf.TensorBuffer</a> that holds the underlying data.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:TensorBuffer">tf.TensorBuffer</a>&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.bufferSync" href="#tf.Tensor.bufferSync"> bufferSync</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L316-L318" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a <a href="#class:TensorBuffer">tf.TensorBuffer</a> that holds the underlying data.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:TensorBuffer">tf.TensorBuffer</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.array" href="#tf.Tensor.array"> array</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L326-L330" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the tensor data as a nested array. The transfer of data is done asynchronously.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;number[]&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.arraySync" href="#tf.Tensor.arraySync"> arraySync</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L338-L342" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the tensor data as a nested array. The transfer of data is done synchronously.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">number[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.data" href="#tf.Tensor.data"> data</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L350-L364" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Asynchronously downloads the values from the <a href="#class:Tensor">tf.Tensor</a>. Returns a promise of <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a> that resolves when the computation has finished.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;DataTypeMap[NumericDataType]&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.dataToGPU" href="#tf.Tensor.dataToGPU"> dataToGPU</a> <span class="signature">(options?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L400-L403" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Copy the tensor's data to a new GPU resource. Comparing to the <code>dataSync()</code> and <code>data()</code>, this method prevents data from being downloaded to CPU.</p> <p>For WebGL backend, the data will be stored on a densely packed texture. This means that the texture will use the RGBA channels to store value.</p> <p>For WebGPU backend, the data will be stored on a buffer. There is no parameter, so can not use a user-defined size to create the buffer.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">options</span> <span class="param-type">(DataToGPUOptions)</span> <span class="param-docs">: For WebGL,</p> <ul> <li>customTexShape: Optional. If set, will use the user defined texture shape to create the texture.</li> </ul> </span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GPUData</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.dataSync" href="#tf.Tensor.dataSync"> dataSync</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L411-L425" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Synchronously downloads the values from the <a href="#class:Tensor">tf.Tensor</a>. This blocks the UI thread until the values are ready, which can cause performance issues.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">DataTypeMap[NumericDataType]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.dispose" href="#tf.Tensor.dispose"> dispose</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L443-L452" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Disposes <a href="#class:Tensor">tf.Tensor</a> from memory.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.print" href="#tf.Tensor.print"> print</a> <span class="signature">(verbose?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L473-L475" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Prints the <a href="#class:Tensor">tf.Tensor</a>. See <a href="#print">tf.print()</a> for details.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">verbose</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to print verbose information about the tensor, including dtype and size.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.clone" href="#tf.Tensor.clone"> clone</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L481-L484" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a copy of the tensor. See <a href="#clone">tf.clone()</a> for details.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Tensor.toString" href="#tf.Tensor.toString"> toString</a> <span class="signature">(verbose?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L491-L494" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a human-readable description of the tensor. Useful for logging.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">verbose</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">string</span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:Variable" href="#class:Variable">tf.Variable</a> <span class="signature"> <span>extends <a href="#class:Tensor">tf.Tensor</a></span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L564-L603" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A mutable <a href="#class:Tensor">tf.Tensor</a>, useful for persisting state, e.g. for training.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Variable.assign" href="#tf.Variable.assign"> assign</a> <span class="signature">(newValue)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L583-L597" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Assign a new <a href="#class:Tensor">tf.Tensor</a> to this variable. The new <a href="#class:Tensor">tf.Tensor</a> must have the same shape and dtype as the old <a href="#class:Tensor">tf.Tensor</a>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">newValue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">New tensor to be assigned to this variable.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:TensorBuffer" href="#class:TensorBuffer">tf.TensorBuffer</a> <span class="signature"> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L45-L161" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A mutable object, similar to <a href="#class:Tensor">tf.Tensor</a>, that allows users to set values at locations before converting to an immutable <a href="#class:Tensor">tf.Tensor</a>.</p> <p>See <a href="#buffer">tf.buffer()</a> for creating a tensor buffer.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.TensorBuffer.set" href="#tf.TensorBuffer.set"> set</a> <span class="signature">(value, ...locs)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L80-L91" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Sets a value in the buffer at a given location.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">value</span> <span class="param-type">(SingleValueMap[D])</span> <span class="param-docs">The value to set.</span> </li> <li class="parameter"> <span class="param-name">...locs</span> <span class="param-type">(number[])</span> <span class="param-docs">The location indices.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.TensorBuffer.get" href="#tf.TensorBuffer.get"> get</a> <span class="signature">(...locs)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L100-L118" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the value in the buffer at the provided location.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">...locs</span> <span class="param-type">(number[])</span> <span class="param-docs">The location indices.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">SingleValueMap[D]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.TensorBuffer.toTensor" href="#tf.TensorBuffer.toTensor"> toTensor</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/tensor.ts#L157-L160" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates an immutable <a href="#class:Tensor">tf.Tensor</a> object from the buffer.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> </div> </div> <div class="subheading"> <div class="title"> <a name="Tensors-Transformations" href="#Tensors-Transformations" class="symbol-link"> Tensors / Transformations </a> </div> <div class="description"> <p>This section describes some common Tensor transformations for reshaping and type-casting.</p> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="batchToSpaceND" href="#batchToSpaceND"> tf.batchToSpaceND</a> <span class="signature">(x, blockShape, crops)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/batch_to_space_nd.ts#L77-L105" target=_blank>Source</a> </span> </div> <div class="documentation"><p>This operation reshapes the &quot;batch&quot; dimension 0 into <code>M + 1</code> dimensions of shape <code>blockShape + [batch]</code>, interleaves these blocks back into the grid defined by the spatial dimensions <code>[1, ..., M]</code>, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according to <code>crops</code> to produce the output. This is the reverse of <a href="#spaceToBatchND">tf.spaceToBatchND()</a>. See below for a precise description.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor4d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]); <span class="hljs-keyword">const</span> blockShape = [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]; <span class="hljs-keyword">const</span> crops = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]; x.<span class="hljs-title function_">batchToSpaceND</span>(blockShape, crops).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A <a href="#class:Tensor">tf.Tensor</a>. N-D with <code>x.shape</code> = <code>[batch] + spatialShape + remainingShape</code>, where spatialShape has <code>M</code> dimensions.</span> </li> <li class="parameter"> <span class="param-name">blockShape</span> <span class="param-type">(number[])</span> <span class="param-docs">A 1-D array. Must have shape <code>[M]</code>, all values must be &gt;= 1.</span> </li> <li class="parameter"> <span class="param-name">crops</span> <span class="param-type">(number[][])</span> <span class="param-docs">A 2-D array. Must have shape <code>[M, 2]</code>, all values must be &gt;= 0. <code>crops[i] = [cropStart, cropEnd]</code> specifies the amount to crop from input dimension <code>i + 1</code>, which corresponds to spatial dimension <code>i</code>. It is required that <code>cropStart[i] + cropEnd[i] &lt;= blockShape[i] * inputShape[i + 1]</code></p> <p>This operation is equivalent to the following steps:</p> <ol> <li> <p>Reshape <code>x</code> to <code>reshaped</code> of shape: <code>[blockShape[0], ..., blockShape[M-1], batch / prod(blockShape), x.shape[1], ..., x.shape[N-1]]</code></p> </li> <li> <p>Permute dimensions of <code>reshaped</code> to produce <code>permuted</code> of shape <code>[batch / prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M], blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]</code></p> </li> <li> <p>Reshape <code>permuted</code> to produce <code>reshapedPermuted</code> of shape <code>[batch / prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]</code></p> </li> <li> <p>Crop the start and end of dimensions <code>[1, ..., M]</code> of <code>reshapedPermuted</code> according to <code>crops</code> to produce the output of shape: <code>[batch / prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1], ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] - crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]</code></p> </li> </ol> </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="broadcastArgs" href="#broadcastArgs"> tf.broadcastArgs</a> <span class="signature">(s0, s1)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/broadcast_args.ts#L41-L60" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Return the shape of s0 op s1 with broadcast.</p> <p>compute r0, the broadcasted shape as a tensor. s0, s1 and r0 are all integer vectors.</p> <p>This function returns the shape of the result of an operation between two tensors of size s0 and s1 performed with broadcast.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">s0</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a> | <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A tensor representing a shape</span> </li> <li class="parameter"> <span class="param-name">s1</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a> | <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A tensor representing a shape</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="broadcastTo" href="#broadcastTo"> tf.broadcastTo</a> <span class="signature">(x, shape)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/broadcast_to.ts#L45-L87" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Broadcast an array to a compatible shape NumPy-style.</p> <p>The tensor's shape is compared to the broadcast shape from end to beginning. Ones are prepended to the tensor's shape until it has the same length as the broadcast shape. If input.shape[i]==shape[i], the (i+1)-th axis is already broadcast-compatible. If input.shape[i]==1 and shape[i]==N, then the input tensor is tiled N times along that axis (using tf.tile).</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">The input is to be broadcast to this shape.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="cast" href="#cast"> tf.cast</a> <span class="signature">(x, dtype)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/cast.ts#L40-L58" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Casts a <a href="#class:Tensor">tf.Tensor</a> to a new dtype.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1.5</span>, <span class="hljs-number">2.5</span>, <span class="hljs-number">3</span>]); tf.<span class="hljs-title function_">cast</span>(x, <span class="hljs-string">&#x27;int32&#x27;</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to be casted.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The dtype to cast the input tensor to.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="depthToSpace" href="#depthToSpace"> tf.depthToSpace</a> <span class="signature">(x, blockSize, dataFormat?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/depth_to_space.ts#L66-L103" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Rearranges data from depth into blocks of spatial data. More specifically, this op outputs a copy of the input tensor where values from the <code>depth</code> dimension are moved in spatial blocks to the <code>height</code> and <code>width</code> dimensions. The attr <code>blockSize</code> indicates the input block size and how the data is moved.</p> <ul> <li> <p>Chunks of data of size <code>blockSize * blockSize</code> from depth are rearranged into non-overlapping blocks of size <code>blockSize x blockSize</code></p> </li> <li> <p>The width the output tensor is <code>inputWidth * blockSize</code>, whereas the height is <code>inputHeight * blockSize</code></p> </li> <li> <p>The Y, X coordinates within each block of the output image are determined by the high order component of the input channel index</p> </li> <li> <p>The depth of the input tensor must be divisible by <code>blockSize * blockSize</code></p> </li> </ul> <p>The <code>dataFormat</code> attr specifies the layout of the input and output tensors with the following options: &quot;NHWC&quot;: [ <code>batch, height, width, channels</code> ] &quot;NCHW&quot;: [ <code>batch, channels, height, width</code> ]</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor4d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> blockSize = <span class="hljs-number">2</span>; <span class="hljs-keyword">const</span> dataFormat = <span class="hljs-string">&quot;NHWC&quot;</span>; tf.<span class="hljs-title function_">depthToSpace</span>(x, blockSize, dataFormat).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor of rank 4</span> </li> <li class="parameter"> <span class="param-name">blockSize</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NHWC'|'NCHW')</span> <span class="param-docs">An optional string from: &quot;NHWC&quot;, &quot;NCHW&quot;. Defaults to &quot;NHWC&quot;</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="ensureShape" href="#ensureShape"> tf.ensureShape</a> <span class="signature">(x, shape)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/ensure_shape.ts#L50-L58" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Checks the input tensor mathes the given shape.</p> <p>Given an input tensor, returns a new tensor with the same values as the input tensor with shape <code>shape</code>.</p> <p>The method supports the null value in tensor. It will still check the shapes, and null is a placeholder.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-literal">null</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> z = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>,<span class="hljs-number">2</span>]); tf.<span class="hljs-title function_">ensureShape</span>(x, [<span class="hljs-number">4</span>]).<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">ensureShape</span>(y, [<span class="hljs-number">4</span>]).<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">ensureShape</span>(z, [<span class="hljs-literal">null</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The input tensor to be ensured.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">A TensorShape representing the shape of this tensor, an array or null.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="expandDims" href="#expandDims"> tf.expandDims</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/expand_dims.ts#L45-L56" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a <a href="#class:Tensor">tf.Tensor</a> that has expanded rank, by inserting a dimension into the tensor's shape.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">expandDims</span>(axis).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor whose dimensions are to be expanded.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension index at which to insert shape of <code>1</code>. Defaults to 0 (the first dimension).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="mirrorPad" href="#mirrorPad"> tf.mirrorPad</a> <span class="signature">(x, paddings, mode)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/mirror_pad.ts#L54-L90" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Pads a <a href="#class:Tensor">tf.Tensor</a> using mirror padding.</p> <p>This operation implements the <code>REFLECT</code> and <code>SYMMETRIC</code> modes of pad.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">range</span>(<span class="hljs-number">0</span>, <span class="hljs-number">9</span>).<span class="hljs-title function_">reshape</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">mirrorPad</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]], <span class="hljs-string">&#x27;reflect&#x27;</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to pad.</span> </li> <li class="parameter"> <span class="param-name">paddings</span> <span class="param-type">(Array)</span> <span class="param-docs">An array of length <code>R</code> (the rank of the tensor), where each element is a length-2 tuple of ints <code>[padBefore, padAfter]</code>, specifying how much to pad along each dimension of the tensor. In &quot;reflect&quot; mode, the padded regions do not include the borders, while in &quot;symmetric&quot; mode the padded regions do include the borders. For example, if the input is <code>[1, 2, 3]</code> and paddings is <code>[0, 2]</code>, then the output is <code>[1, 2, 3, 2, 1]</code> in &quot;reflect&quot; mode, and <code>[1, 2, 3, 3, 2]</code> in &quot;symmetric&quot; mode. If <code>mode</code> is &quot;reflect&quot; then both <code>paddings[D, 0]</code> and <code>paddings[D, 1]</code> must be no greater than <code>x.shape[D] - 1</code>. If mode is &quot;symmetric&quot; then both <code>paddings[D, 0]</code> and <code>paddings[D, 1]</code> must be no greater than <code>x.shape[D]</code></span> </li> <li class="parameter"> <span class="param-name">mode</span> <span class="param-type">('reflect'|'symmetric')</span> <span class="param-docs">String to specify padding mode. Can be <code>'reflect' | 'symmetric'</code></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="pad" href="#pad"> tf.pad</a> <span class="signature">(x, paddings, constantValue?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/pad.ts#L53-L65" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Pads a <a href="#class:Tensor">tf.Tensor</a> with a given value and paddings.</p> <p>This operation implements <code>CONSTANT</code> mode. For <code>REFLECT</code> and <code>SYMMETRIC</code>, refer to <a href="#mirrorPad">tf.mirrorPad()</a>.</p> <p>Also available are stricter rank-specific methods with the same signature as this method that assert that <code>paddings</code> is of given length.</p> <ul> <li><code>tf.pad1d</code></li> <li><code>tf.pad2d</code></li> <li><code>tf.pad3d</code></li> <li><code>tf.pad4d</code></li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">pad</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to pad.</span> </li> <li class="parameter"> <span class="param-name">paddings</span> <span class="param-type">(Array)</span> <span class="param-docs">An array of length <code>R</code> (the rank of the tensor), where each element is a length-2 tuple of ints <code>[padBefore, padAfter]</code>, specifying how much to pad along each dimension of the tensor.</span> </li> <li class="parameter"> <span class="param-name">constantValue</span> <span class="param-type">(number)</span> <span class="param-docs">The pad value to use. Defaults to 0.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="reshape" href="#reshape"> tf.reshape</a> <span class="signature">(x, shape)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/reshape.ts#L54-L63" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Reshapes a <a href="#class:Tensor">tf.Tensor</a> to a given shape.</p> <p>Given an input tensor, returns a new tensor with the same values as the input tensor with shape <code>shape</code>.</p> <p>If one component of shape is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a shape of [-1] flattens into 1-D. At most one component of shape can be -1.</p> <p>If shape is 1-D or higher, then the operation returns a tensor with shape shape filled with the values of tensor. In this case, the number of elements implied by shape must be the same as the number of elements in tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">reshape</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to be reshaped.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="setdiff1dAsync" href="#setdiff1dAsync"> tf.setdiff1dAsync</a> <span class="signature">(x, y)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/setdiff1d_async.ts#L51-L88" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the difference between two lists of numbers.</p> <p>Given a Tensor <code>x</code> and a Tensor <code>y</code>, this operation returns a Tensor <code>out</code> that represents all values that are in <code>x</code> but not in <code>y</code>. The returned Tensor <code>out</code> is sorted in the same order that the numbers appear in <code>x</code> (duplicates are preserved). This operation also returns a Tensor indices that represents the position of each out element in <code>x</code>. In other words:</p> <p><code>out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]; <span class="hljs-keyword">const</span> y = [<span class="hljs-number">1</span>, <span class="hljs-number">3</span>, <span class="hljs-number">5</span>]; <span class="hljs-keyword">const</span> [out, indices] = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">setdiff1dAsync</span>(x, y); out.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [2, 4, 6]</span> indices.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [1, 3, 5]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">1-D Tensor. Values to keep.</span> </li> <li class="parameter"> <span class="param-name">y</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">1-D Tensor. Must have the same type as x. Values to exclude in the output.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;[<a href="#class:Tensor">tf.Tensor</a>, <a href="#class:Tensor">tf.Tensor</a>]&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="spaceToBatchND" href="#spaceToBatchND"> tf.spaceToBatchND</a> <span class="signature">(x, blockShape, paddings)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/space_to_batch_nd.ts#L77-L113" target=_blank>Source</a> </span> </div> <div class="documentation"><p>This operation divides &quot;spatial&quot; dimensions <code>[1, ..., M]</code> of the input into a grid of blocks of shape <code>blockShape</code>, and interleaves these blocks with the &quot;batch&quot; dimension (0) such that in the output, the spatial dimensions <code>[1, ..., M]</code> correspond to the position within the grid, and the batch dimension combines both the position within a spatial block and the original batch position. Prior to division into blocks, the spatial dimensions of the input are optionally zero padded according to <code>paddings</code>. See below for a precise description.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor4d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]); <span class="hljs-keyword">const</span> blockShape = [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]; <span class="hljs-keyword">const</span> paddings = [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]; x.<span class="hljs-title function_">spaceToBatchND</span>(blockShape, paddings).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A <a href="#class:Tensor">tf.Tensor</a>. N-D with <code>x.shape</code> = <code>[batch] + spatialShape + remainingShape</code>, where spatialShape has <code>M</code> dimensions.</span> </li> <li class="parameter"> <span class="param-name">blockShape</span> <span class="param-type">(number[])</span> <span class="param-docs">A 1-D array. Must have shape <code>[M]</code>, all values must be &gt;= 1.</span> </li> <li class="parameter"> <span class="param-name">paddings</span> <span class="param-type">(number[][])</span> <span class="param-docs">A 2-D array. Must have shape <code>[M, 2]</code>, all values must be &gt;= 0. <code>paddings[i] = [padStart, padEnd]</code> specifies the amount to zero-pad from input dimension <code>i + 1</code>, which corresponds to spatial dimension <code>i</code>. It is required that <code>(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0</code></p> <p>This operation is equivalent to the following steps:</p> <ol> <li> <p>Zero-pad the start and end of dimensions <code>[1, ..., M]</code> of the input according to <code>paddings</code> to produce <code>padded</code> of shape paddedShape.</p> </li> <li> <p>Reshape <code>padded</code> to <code>reshapedPadded</code> of shape: <code>[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ..., paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape</code></p> </li> <li> <p>Permute dimensions of <code>reshapedPadded</code> to produce <code>permutedReshapedPadded</code> of shape: <code>blockShape + [batch] + [paddedShape[1] / blockShape[0], ..., paddedShape[M] / blockShape[M-1]] + remainingShape</code></p> </li> <li> <p>Reshape <code>permutedReshapedPadded</code> to flatten <code>blockShape</code> into the batch dimension, producing an output tensor of shape: <code>[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ..., paddedShape[M] / blockShape[M-1]] + remainingShape</code></p> </li> </ol> </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="squeeze" href="#squeeze"> tf.squeeze</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/squeeze.ts#L41-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Removes dimensions of size 1 from the shape of a <a href="#class:Tensor">tf.Tensor</a>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">squeeze</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to be squeezed.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number[])</span> <span class="param-docs">An optional list of numbers. If specified, only squeezes the dimensions listed. The dimension index starts at 0. It is an error to squeeze a dimension that is not 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Tensors-Slicing and Joining" href="#Tensors-Slicing and Joining" class="symbol-link"> Tensors / Slicing and Joining </a> </div> <div class="description"> <p>TensorFlow.js provides several operations to slice or extract parts of a tensor, or join multiple tensors together. </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="booleanMaskAsync" href="#booleanMaskAsync"> tf.booleanMaskAsync</a> <span class="signature">(tensor, mask, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/boolean_mask.ts#L46-L88" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Apply boolean mask to tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> tensor = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> mask = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> result = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">booleanMaskAsync</span>(tensor, mask); result.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">tensor</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">N-D tensor.</span> </li> <li class="parameter"> <span class="param-name">mask</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">K-D boolean tensor, K &lt;= N and K must be known statically.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">A 0-D int Tensor representing the axis in tensor to mask from. By default, axis is 0 which will mask from the first dimension. Otherwise K + axis &lt;= N.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor</a>&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="concat" href="#concat"> tf.concat</a> <span class="signature">(tensors, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/concat.ts#L69-L94" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Concatenates a list of <a href="#class:Tensor">tf.Tensor</a>s along a given axis.</p> <p>The tensors ranks and types must match, and their sizes must match in all dimensions except <code>axis</code>.</p> <p>Also available are stricter rank-specific methods that assert that <code>tensors</code> are of the given rank:</p> <ul> <li><code>tf.concat1d</code></li> <li><code>tf.concat2d</code></li> <li><code>tf.concat3d</code></li> <li><code>tf.concat4d</code></li> </ul> <p>Except <code>tf.concat1d</code> (which does not have axis param), all methods have same signature as this method.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">concat</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or a.concat(b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">5</span>, <span class="hljs-number">6</span>]); tf.<span class="hljs-title function_">concat</span>([a, b, c]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">30</span>, <span class="hljs-number">40</span>]]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; tf.<span class="hljs-title function_">concat</span>([a, b], axis).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">tensors</span> <span class="param-type">(Array)</span> <span class="param-docs">A list of tensors to concatenate.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis to concatenate along. Defaults to 0 (the first dim).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="gather" href="#gather"> tf.gather</a> <span class="signature">(x, indices, axis?, batchDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/gather.ts#L54-L65" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Gather slices from tensor <code>x</code>'s axis <code>axis</code> according to <code>indices</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> indices = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); x.<span class="hljs-title function_">gather</span>(indices).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> indices = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); x.<span class="hljs-title function_">gather</span>(indices).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor whose slices are to be gathered.</span> </li> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The indices of the values to extract.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis over which to select values. Defaults to 0.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">batchDims</span> <span class="param-type">(number)</span> <span class="param-docs">Optional. The number of batch dimensions. It must be less than or equal to rank(indices). Defaults to 0. The output tensor will have shape of <code>x.shape[:axis] + indices.shape[batchDims:] + x.shape[axis + 1:]</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="reverse" href="#reverse"> tf.reverse</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/reverse.ts#L59-L69" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Reverses a <a href="#class:Tensor">tf.Tensor</a> along a specified axis.</p> <p>Also available are stricter rank-specific methods that assert that <code>x</code> is of the given rank:</p> <ul> <li><code>tf.reverse1d</code></li> <li><code>tf.reverse2d</code></li> <li><code>tf.reverse3d</code></li> <li><code>tf.reverse4d</code></li> </ul> <p>Except <code>tf.reverse1d</code> (which does not have axis param), all methods have same signature as this method.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">reverse</span>().<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">reverse</span>(axis).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to be reversed.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The set of dimensions to reverse. Must be in the range [-rank(x), rank(x)). Defaults to all axes.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="slice" href="#slice"> tf.slice</a> <span class="signature">(x, begin, size?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/slice.ts#L62-L76" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Extracts a slice from a <a href="#class:Tensor">tf.Tensor</a> starting at coordinates <code>begin</code> and is of size <code>size</code>.</p> <p>Also available are stricter rank-specific methods with the same signature as this method that assert that <code>x</code> is of the given rank:</p> <ul> <li><code>tf.slice1d</code></li> <li><code>tf.slice2d</code></li> <li><code>tf.slice3d</code></li> <li><code>tf.slice4d</code></li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">slice</span>([<span class="hljs-number">1</span>], [<span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); x.<span class="hljs-title function_">slice</span>([<span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input <a href="#class:Tensor">tf.Tensor</a> to slice from.</span> </li> <li class="parameter"> <span class="param-name">begin</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The coordinates to start the slice from. The length can be less than the rank of x - the rest of the axes will have implicit 0 as start. Can also be a single number, in which case it specifies the first axis.</span> </li> <li class="parameter"> <span class="param-name">size</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The size of the slice. The length can be less than the rank of x - the rest of the axes will have implicit -1. A value of -1 requests the rest of the dimensions in the axis. Can also be a single number, in which case it specifies the size of the first axis.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="split" href="#split"> tf.split</a> <span class="signature">(x, numOrSizeSplits, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/split.ts#L62-L72" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Splits a <a href="#class:Tensor">tf.Tensor</a> into sub tensors.</p> <p>If <code>numOrSizeSplits</code> is a number, splits <code>x</code> along dimension <code>axis</code> into <code>numOrSizeSplits</code> smaller tensors. Requires that <code>numOrSizeSplits</code> evenly divides <code>x.shape[axis]</code>.</p> <p>If <code>numOrSizeSplits</code> is a number array, splits <code>x</code> into <code>numOrSizeSplits.length</code> pieces. The shape of the <code>i</code>-th piece has the same size as <code>x</code> except along dimension <code>axis</code> where the size is <code>numOrSizeSplits[i]</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> [a, b] = tf.<span class="hljs-title function_">split</span>(x, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>); a.<span class="hljs-title function_">print</span>(); b.<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> [c, d, e] = tf.<span class="hljs-title function_">split</span>(x, [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>], <span class="hljs-number">1</span>); c.<span class="hljs-title function_">print</span>(); d.<span class="hljs-title function_">print</span>(); e.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to split.</span> </li> <li class="parameter"> <span class="param-name">numOrSizeSplits</span> <span class="param-type">(number[]|number)</span> <span class="param-docs">Either an integer indicating the number of splits along the axis or an array of integers containing the sizes of each output tensor along the axis. If a number then it must evenly divide <code>x.shape[axis]</code>; otherwise the sum of sizes must match <code>x.shape[axis]</code>. Can contain one -1 indicating that dimension is to be inferred.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension along which to split. Defaults to 0 (the first dim).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="stack" href="#stack"> tf.stack</a> <span class="signature">(tensors, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/stack.ts#L44-L63" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Stacks a list of rank-<code>R</code> <a href="#class:Tensor">tf.Tensor</a>s into one rank-<code>(R+1)</code> <a href="#class:Tensor">tf.Tensor</a>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">5</span>, <span class="hljs-number">6</span>]); tf.<span class="hljs-title function_">stack</span>([a, b, c]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">tensors</span> <span class="param-type">(Array)</span> <span class="param-docs">A list of tensor objects with the same shape and dtype.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis to stack along. Defaults to 0 (the first dim).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tile" href="#tile"> tf.tile</a> <span class="signature">(x, reps)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tile.ts#L54-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Construct a tensor by repeating it the number of times given by reps.</p> <p>This operation creates a new tensor by replicating <code>input</code> <code>reps</code> times. The output tensor's <code>i</code>th dimension has <code>input.shape[i] * reps[i]</code> elements, and the values of <code>input</code> are replicated <code>reps[i]</code> times along the <code>i</code>th dimension. For example, tiling <code>[a, b, c, d]</code> by <code>[2]</code> produces <code>[a, b, c, d, a, b, c, d]</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">tile</span>([<span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.tile(a, [2])</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">tile</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.tile(a, [1,2])</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to tile.</span> </li> <li class="parameter"> <span class="param-name">reps</span> <span class="param-type">(number[])</span> <span class="param-docs">Determines the number of replications per dimension.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="unstack" href="#unstack"> tf.unstack</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/unstack.ts#L43-L56" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Unstacks a <a href="#class:Tensor">tf.Tensor</a> of rank-<code>R</code> into a list of rank-<code>(R-1)</code> <a href="#class:Tensor">tf.Tensor</a>s.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); tf.<span class="hljs-title function_">unstack</span>(a).<span class="hljs-title function_">forEach</span>(<span class="hljs-function"><span class="hljs-params">tensor</span> =&gt;</span> tensor.<span class="hljs-title function_">print</span>()); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A tensor object.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis to unstack along. Defaults to 0 (the first dim).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Tensors-Matrices" href="#Tensors-Matrices" class="symbol-link"> Tensors / Matrices </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="einsum" href="#einsum"> tf.einsum</a> <span class="signature">(equation, ...tensors)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/einsum.ts#L103-L110" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Tensor contraction over specified indices and outer product.</p> <p><code>einsum</code> allows defining Tensors by defining their element-wise computation. This computation is based on <a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Einstein_notation">Einstein summation</a>.</p> <p>Some special cases include:</p> <p>Matrix multiplication:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">5</span>]]); x.<span class="hljs-title function_">print</span>(); y.<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">einsum</span>(<span class="hljs-string">&#x27;ij,jk-&gt;ik&#x27;</span>, x, y).<span class="hljs-title function_">print</span>(); </code></pre> <p>Dot product:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); x.<span class="hljs-title function_">print</span>(); y.<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">einsum</span>(<span class="hljs-string">&#x27;i,i-&gt;&#x27;</span>, x, y).<span class="hljs-title function_">print</span>(); </code></pre> <p>Batch dot product:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>]]); x.<span class="hljs-title function_">print</span>(); y.<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">einsum</span>(<span class="hljs-string">&#x27;bi,bi-&gt;b&#x27;</span>, x, y).<span class="hljs-title function_">print</span>(); </code></pre> <p>Outer prouduct:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>, <span class="hljs-number">5</span>]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>]); x.<span class="hljs-title function_">print</span>(); y.<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">einsum</span>(<span class="hljs-string">&#x27;i,j-&gt;ij&#x27;</span>, x, y).<span class="hljs-title function_">print</span>(); </code></pre> <p>Matrix transpose:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); x.<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">einsum</span>(<span class="hljs-string">&#x27;ij-&gt;ji&#x27;</span>, x).<span class="hljs-title function_">print</span>(); </code></pre> <p>Batch matrix transpose:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor3d</span>([[[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]], [[-<span class="hljs-number">1</span>, -<span class="hljs-number">2</span>], [-<span class="hljs-number">3</span>, -<span class="hljs-number">4</span>]]]); x.<span class="hljs-title function_">print</span>(); tf.<span class="hljs-title function_">einsum</span>(<span class="hljs-string">&#x27;bij-&gt;bji&#x27;</span>, x).<span class="hljs-title function_">print</span>(); </code></pre> <p>Limitations:</p> <p>This implementation of einsum has the following limitations:</p> <ul> <li>Does not support &gt;2 input tensors.</li> <li>Does not support duplicate axes for any given input tensor. E.g., equation 'ii-&gt;' is not supported.</li> <li>The <code>...</code> notation is not supported.</li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">equation</span> <span class="param-type">(string)</span> <span class="param-docs">a string describing the contraction, in the same format as <a target="_blank" rel="noopener" href="https://numpy.org/doc/stable/reference/generated/numpy.einsum.html">numpy.einsum</a>.</span> </li> <li class="parameter"> <span class="param-name">...tensors</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">the input(s) to contract (each one a Tensor), whose shapes should be consistent with equation.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Tensors-Random" href="#Tensors-Random" class="symbol-link"> Tensors / Random </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="multinomial" href="#multinomial"> tf.multinomial</a> <span class="signature">(logits, numSamples, seed?, normalized?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/multinomial.ts#L49-L81" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values drawn from a multinomial distribution.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> probs = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">.75</span>, <span class="hljs-number">.25</span>]); tf.<span class="hljs-title function_">multinomial</span>(probs, <span class="hljs-number">3</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">logits</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">1D array with unnormalized log-probabilities, or 2D array of shape <code>[batchSize, numOutcomes]</code>. See the <code>normalized</code> parameter.</span> </li> <li class="parameter"> <span class="param-name">numSamples</span> <span class="param-type">(number)</span> <span class="param-docs">Number of samples to draw for each row slice.</span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">The seed number.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">normalized</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the provided <code>logits</code> are normalized true probabilities (sum to 1). Defaults to false.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a>|<a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="rand" href="#rand"> tf.rand</a> <span class="signature">(shape, randFunction, dtype?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/rand.ts#L37-L56" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values sampled from a random number generator function defined by the user.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">randFunction</span> <span class="param-type">(() =&gt; number)</span> <span class="param-docs">A random number generator function which is called for each element in the output tensor.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type of the output tensor. Defaults to 'float32'.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="randomGamma" href="#randomGamma"> tf.randomGamma</a> <span class="signature">(shape, alpha, beta?, dtype?, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/random_gamma.ts#L42-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values sampled from a gamma distribution.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">randomGamma</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], <span class="hljs-number">1</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">alpha</span> <span class="param-type">(number)</span> <span class="param-docs">The shape parameter of the gamma distribution.</span> </li> <li class="parameter"> <span class="param-name">beta</span> <span class="param-type">(number)</span> <span class="param-docs">The inverse scale parameter of the gamma distribution. Defaults to 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32')</span> <span class="param-docs">The data type of the output. Defaults to float32.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">The seed for the random number generator.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="randomNormal" href="#randomNormal"> tf.randomNormal</a> <span class="signature">(shape, mean?, stdDev?, dtype?, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/random_normal.ts#L41-L55" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values sampled from a normal distribution.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">randomNormal</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">mean</span> <span class="param-type">(number)</span> <span class="param-docs">The mean of the normal distribution.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">stdDev</span> <span class="param-type">(number)</span> <span class="param-docs">The standard deviation of the normal distribution.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32')</span> <span class="param-docs">The data type of the output.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">The seed for the random number generator.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="randomStandardNormal" href="#randomStandardNormal"> tf.randomStandardNormal</a> <span class="signature">(shape, dtype?, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/random_standard_normal.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values sampled from a normal distribution.</p> <p>The generated values will have mean 0 and standard deviation 1.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">randomStandardNormal</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32')</span> <span class="param-docs">The data type of the output.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">The seed for the random number generator.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="randomUniform" href="#randomUniform"> tf.randomUniform</a> <span class="signature">(shape, minval?, maxval?, dtype?, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/random_uniform.ts#L49-L59" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with values sampled from a uniform distribution.</p> <p>The generated values follow a uniform distribution in the range [minval, maxval). The lower bound minval is included in the range, while the upper bound maxval is excluded.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">randomUniform</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">minval</span> <span class="param-type">(number)</span> <span class="param-docs">The lower bound on the range of random values to generate. Defaults to 0.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">maxval</span> <span class="param-type">(number)</span> <span class="param-docs">The upper bound on the range of random values to generate. Defaults to 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data type of the output tensor. Defaults to 'float32'.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number|string)</span> <span class="param-docs">An optional int. Defaults to 0. If seed is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="randomUniformInt" href="#randomUniformInt"> tf.randomUniformInt</a> <span class="signature">(shape, minval, maxval, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/random_uniform_int.ts#L43-L48" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> with integers sampled from a uniform distribution.</p> <p>The generated values are uniform integers in the range [minval, maxval). The lower bound minval is included in the range, while the upper bound maxval is excluded.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-title function_">randomUniformInt</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], <span class="hljs-number">0</span>, <span class="hljs-number">10</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of integers defining the output tensor shape.</span> </li> <li class="parameter"> <span class="param-name">minval</span> <span class="param-type">(number)</span> <span class="param-docs">Inclusive lower bound on the generated integers.</span> </li> <li class="parameter"> <span class="param-name">maxval</span> <span class="param-type">(number)</span> <span class="param-docs">Exclusive upper bound on the generated integers.</span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number|string)</span> <span class="param-docs">An optional int. Defaults to 0. If seed is set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Models" href="#Models" class="symbol-link">Models</a> </div> <div class="description"> <p>Models are one of the primary abstractions used in TensorFlow.js Layers. Models can be trained, evaluated, and used for prediction. A model's state (topology, and optionally, trained weights) can be restored from various formats.</p> <p>Models are a collection of Layers, see Model Creation for details about how Layers can be connected.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Creation" href="#Models-Creation" class="symbol-link"> Models / Creation </a> </div> <div class="description"> <p>There are two primary ways of creating models.</p> <ul><li>Sequential &mdash; Easiest, works if the models is a simple stack of each layer's input resting on the top of the previous layer's output.</li> <li>Model &mdash; Offers more control if the layers need to be wired together in graph-like ways &mdash; multiple 'towers', layers that skip a layer, etc.</li></ul> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sequential" href="#sequential"> tf.sequential</a> <span class="signature">(config?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports.ts#L134-L136" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Sequential">tf.Sequential</a> model. A sequential model is any model where the outputs of one layer are the inputs to the next layer, i.e. the model topology is a simple 'stack' of layers, with no branching or skipping.</p> <p>This means that the first layer passed to a <a href="#class:Sequential">tf.Sequential</a> model should have a defined input shape. What that means is that it should have received an <code>inputShape</code> or <code>batchInputShape</code> argument, or for some type of layers (recurrent, Dense...) an <code>inputDim</code> argument.</p> <p>The key difference between <a href="#model">tf.model()</a> and <a href="#sequential">tf.sequential()</a> is that <a href="#sequential">tf.sequential()</a> is less generic, supporting only a linear stack of layers. <a href="#model">tf.model()</a> is more generic and supports an arbitrary graph (without cycles) of layers.</p> <p>Examples:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); <span class="hljs-comment">// First layer must have an input shape defined.</span> model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">32</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">50</span>]})); <span class="hljs-comment">// Afterwards, TF.js does automatic shape inference.</span> model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>})); <span class="hljs-comment">// Inspect the inferred shape of the model&#x27;s output, which equals</span> <span class="hljs-comment">// `[null, 4]`. The 1st dimension is the undetermined batch dimension; the</span> <span class="hljs-comment">// 2nd is the output size of the model&#x27;s last layer.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(model.<span class="hljs-property">outputs</span>[<span class="hljs-number">0</span>].<span class="hljs-property">shape</span>)); </code></pre> <p>It is also possible to specify a batch size (with potentially undetermined batch dimension, denoted by &quot;null&quot;) for the first layer using the <code>batchInputShape</code> key. The following example is equivalent to the above:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); <span class="hljs-comment">// First layer must have a defined input shape</span> model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">32</span>, <span class="hljs-attr">batchInputShape</span>: [<span class="hljs-literal">null</span>, <span class="hljs-number">50</span>]})); <span class="hljs-comment">// Afterwards, TF.js does automatic shape inference.</span> model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>})); <span class="hljs-comment">// Inspect the inferred shape of the model&#x27;s output.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(model.<span class="hljs-property">outputs</span>[<span class="hljs-number">0</span>].<span class="hljs-property">shape</span>)); </code></pre> <p>You can also use an <code>Array</code> of already-constructed <code>Layer</code>s to create a <a href="#class:Sequential">tf.Sequential</a> model:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>({ <span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">32</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">50</span>]}), tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>})] }); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(model.<span class="hljs-property">outputs</span>[<span class="hljs-number">0</span>].<span class="hljs-property">shape</span>)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">layers</span> <span class="param-type">(<a href="#class:layers.Layer">tf.layers.Layer</a>[])</span> <span class="param-docs">Stack of layers for the model.</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">The name of this model.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Sequential">tf.Sequential</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="model" href="#model"> tf.model</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports.ts#L70-L72" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A model is a data structure that consists of <code>Layers</code> and defines inputs and outputs.</p> <p>The key difference between <a href="#model">tf.model()</a> and <a href="#sequential">tf.sequential()</a> is that <a href="#model">tf.model()</a> is more generic, supporting an arbitrary graph (without cycles) of layers. <a href="#sequential">tf.sequential()</a> is less generic and supports only a linear stack of layers.</p> <p>When creating a <a href="#class:LayersModel">tf.LayersModel</a>, specify its input(s) and output(s). Layers are used to wire input(s) to output(s).</p> <p>For example, the following code snippet defines a model consisting of two <code>dense</code> layers, with 10 and 4 units, respectively.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Define input, which has a size of 5 (not including batch dimension).</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">5</span>]}); <span class="hljs-comment">// First dense layer uses relu activation.</span> <span class="hljs-keyword">const</span> denseLayer1 = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">10</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;relu&#x27;</span>}); <span class="hljs-comment">// Second dense layer uses softmax activation.</span> <span class="hljs-keyword">const</span> denseLayer2 = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;softmax&#x27;</span>}); <span class="hljs-comment">// Obtain the output symbolic tensor by applying the layers on the input.</span> <span class="hljs-keyword">const</span> output = denseLayer2.<span class="hljs-title function_">apply</span>(denseLayer1.<span class="hljs-title function_">apply</span>(input)); <span class="hljs-comment">// Create the model based on the inputs.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">model</span>({<span class="hljs-attr">inputs</span>: input, <span class="hljs-attr">outputs</span>: output}); <span class="hljs-comment">// The model can be used for training, evaluation and prediction.</span> <span class="hljs-comment">// For example, the following line runs prediction with the model on</span> <span class="hljs-comment">// some fake data.</span> model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">2</span>, <span class="hljs-number">5</span>])).<span class="hljs-title function_">print</span>(); </code></pre> <p>See also: <a href="#sequential">tf.sequential()</a>, <a href="#loadLayersModel">tf.loadLayersModel()</a>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputs</span> <span class="param-type">(<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>|<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>[])</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">outputs</span> <span class="param-type">(<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>|<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>[])</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:LayersModel">tf.LayersModel</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Inputs" href="#Models-Inputs" class="symbol-link"> Models / Inputs </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="input" href="#input"> tf.input</a> <span class="signature">(config)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports.ts#L161-L163" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Used to instantiate an input to a model as a <a href="#class:SymbolicTensor">tf.SymbolicTensor</a>.</p> <p>Users should call the <code>input</code> factory function for consistency with other generator functions.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Defines a simple logistic regression model with 32 dimensional input</span> <span class="hljs-comment">// and 3 dimensional output.</span> <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">32</span>]}); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">3</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;softmax&#x27;</span>}).<span class="hljs-title function_">apply</span>(x); <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">model</span>({<span class="hljs-attr">inputs</span>: x, <span class="hljs-attr">outputs</span>: y}); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">2</span>, <span class="hljs-number">32</span>])).<span class="hljs-title function_">print</span>(); </code></pre> <p>Note: <code>input</code> is only necessary when using <code>model</code>. When using <code>sequential</code>, specify <code>inputShape</code> for the first layer or use <code>inputLayer</code> as the first layer.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">shape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">A shape, not including the batch size. For instance, <code>shape=[32]</code> indicates that the expected input will be batches of 32-dimensional vectors.</span> </li> <li class="parameter config-param"> <span class="param-name">batchShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">A shape tuple (integer), including the batch size. For instance, <code>batchShape=[10, 32]</code> indicates that the expected input will be batches of 10 32-dimensional vectors. <code>batchShape=[null, 32]</code> indicates batches of an arbitrary number of 32-dimensional vectors.</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided.</span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">sparse</span> <span class="param-type">(boolean)</span> <span class="param-docs">A boolean specifying whether the placeholder to be created is sparse.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:SymbolicTensor">tf.SymbolicTensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Loading" href="#Models-Loading" class="symbol-link"> Models / Loading </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="loadGraphModel" href="#loadGraphModel"> tf.loadGraphModel</a> <span class="signature">(modelUrl, options?, tfio?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L624-L642" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Load a graph model given a URL to the model definition.</p> <p>Example of loading MobileNetV2 from a URL and making a prediction with a zeros input:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> modelUrl = <span class="hljs-string">&#x27;https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json&#x27;</span>; <span class="hljs-keyword">const</span> model = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadGraphModel</span>(modelUrl); <span class="hljs-keyword">const</span> zeros = tf.<span class="hljs-title function_">zeros</span>([<span class="hljs-number">1</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>, <span class="hljs-number">3</span>]); model.<span class="hljs-title function_">predict</span>(zeros).<span class="hljs-title function_">print</span>(); </code></pre> <p>Example of loading MobileNetV2 from a TF Hub URL and making a prediction with a zeros input:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> modelUrl = <span class="hljs-string">&#x27;https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/2&#x27;</span>; <span class="hljs-keyword">const</span> model = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadGraphModel</span>(modelUrl, {<span class="hljs-attr">fromTFHub</span>: <span class="hljs-literal">true</span>}); <span class="hljs-keyword">const</span> zeros = tf.<span class="hljs-title function_">zeros</span>([<span class="hljs-number">1</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>, <span class="hljs-number">3</span>]); model.<span class="hljs-title function_">predict</span>(zeros).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">modelUrl</span> <span class="param-type">(string|io.IOHandler)</span> <span class="param-docs">The url or an <code>io.IOHandler</code> that loads the model.</span> </li> <li class="parameter"> <span class="param-name">options</span> <span class="param-type">(Object)</span> <span class="param-docs">Options for the HTTP request, which allows to send credentials and custom headers.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">requestInit</span> <span class="param-type">(RequestInit)</span> <span class="param-docs">RequestInit (options) for HTTP requests.</p> <p>For detailed information on the supported fields, see <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/Request/Request">https://developer.mozilla.org/en-US/docs/Web/API/Request/Request</a></span> </li> <li class="parameter config-param"> <span class="param-name">onProgress</span> <span class="param-type">(OnProgressCallback)</span> <span class="param-docs">Progress callback.</span> </li> <li class="parameter config-param"> <span class="param-name">fetchFunc</span> <span class="param-type">(typeof <a href="#fetch">tf.fetch()</a>)</span> <span class="param-docs">A function used to override the <code>window.fetch</code> function.</span> </li> <li class="parameter config-param"> <span class="param-name">strict</span> <span class="param-type">(boolean)</span> <span class="param-docs">Strict loading model: whether extraneous weights or missing weights should trigger an <code>Error</code>.</p> <p>If <code>true</code>, require that the provided weights exactly match those required by the layers. <code>false</code> means that both extra weights and missing weights will be silently ignored.</p> <p>Default: <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">weightPathPrefix</span> <span class="param-type">(string)</span> <span class="param-docs">Path prefix for weight files, by default this is calculated from the path of the model JSON file.</p> <p>For instance, if the path to the model JSON file is <code>http://localhost/foo/model.json</code>, then the default path prefix will be <code>http://localhost/foo/</code>. If a weight file has the path value <code>group1-shard1of2</code> in the weight manifest, then the weight file will be loaded from <code>http://localhost/foo/group1-shard1of2</code> by default. However, if you provide a <code>weightPathPrefix</code> value of <code>http://localhost/foo/alt-weights</code>, then the weight file will be loaded from the path <code>http://localhost/foo/alt-weights/group1-shard1of2</code> instead.</span> </li> <li class="parameter config-param"> <span class="param-name">fromTFHub</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the module or model is to be loaded from TF Hub.</p> <p>Setting this to <code>true</code> allows passing a TF-Hub module URL, omitting the standard model file name and the query parameters.</p> <p>Default: <code>false</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">weightUrlConverter</span> <span class="param-type">((weightFileName: string) =&gt; Promise&lt;string&gt;)</span> <span class="param-docs">An async function to convert weight file name to URL. The weight file names are stored in model.json's weightsManifest.paths field. By default we consider weight files are colocated with the model.json file. For example: model.json URL: https://www.google.com/models/1/model.json group1-shard1of1.bin url: https://www.google.com/models/1/group1-shard1of1.bin</p> <p>With this func you can convert the weight file name to any URL.</span> </li> <li class="parameter config-param"> <span class="param-name">streamWeights</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to stream the model directly to the backend or cache all its weights on CPU first. Useful for large models.</span> </li> <li class="parameter"> <span class="param-name">tfio</span> <span class="param-type">(typeof import(&quot;@tensorflow/tfjs-core/dist/io/io&quot;))</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:GraphModel">tf.GraphModel</a>&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="loadLayersModel" href="#loadLayersModel"> tf.loadLayersModel</a> <span class="signature">(pathOrIOHandler, options?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/models.ts#L248-L270" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Load a model composed of Layer objects, including its topology and optionally weights. See the Tutorial named &quot;How to import a Keras Model&quot; for usage examples.</p> <p>This method is applicable to:</p> <ol> <li>Models created with the <code>tf.layers.*</code>, <a href="#sequential">tf.sequential()</a>, and <a href="#model">tf.model()</a> APIs of TensorFlow.js and later saved with the <a href="#tf.LayersModel.save">tf.LayersModel.save()</a> method.</li> <li>Models converted from Keras or TensorFlow tf.keras using the <a target="_blank" rel="noopener" href="https://github.com/tensorflow/tfjs/tree/master/tfjs-converter">tensorflowjs_converter</a>.</li> </ol> <p>This mode is <em>not</em> applicable to TensorFlow <code>SavedModel</code>s or their converted forms. For those models, use <a href="#loadGraphModel">tf.loadGraphModel()</a>.</p> <p>Example 1. Load a model from an HTTP server.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>( <span class="hljs-string">&#x27;https://storage.googleapis.com/tfjs-models/tfjs/iris_v1/model.json&#x27;</span>); model.<span class="hljs-title function_">summary</span>(); </code></pre> <p>Example 2: Save <code>model</code>'s topology and weights to browser <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage">local storage</a>; then load it back.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>( {<span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})]}); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from original model:&#x27;</span>); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://my-model-1&#x27;</span>); <span class="hljs-keyword">const</span> loadedModel = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>(<span class="hljs-string">&#x27;localstorage://my-model-1&#x27;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from loaded model:&#x27;</span>); loadedModel.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); </code></pre> <p>Example 3. Saving <code>model</code>'s topology and weights to browser <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API">IndexedDB</a>; then load it back.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>( {<span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})]}); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from original model:&#x27;</span>); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;indexeddb://my-model-1&#x27;</span>); <span class="hljs-keyword">const</span> loadedModel = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>(<span class="hljs-string">&#x27;indexeddb://my-model-1&#x27;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from loaded model:&#x27;</span>); loadedModel.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); </code></pre> <p>Example 4. Load a model from user-selected files from HTML <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file">file input elements</a>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Note: this code snippet will not work without the HTML elements in the</span> <span class="hljs-comment">// page</span> <span class="hljs-keyword">const</span> jsonUpload = <span class="hljs-variable language_">document</span>.<span class="hljs-title function_">getElementById</span>(<span class="hljs-string">&#x27;json-upload&#x27;</span>); <span class="hljs-keyword">const</span> weightsUpload = <span class="hljs-variable language_">document</span>.<span class="hljs-title function_">getElementById</span>(<span class="hljs-string">&#x27;weights-upload&#x27;</span>); <span class="hljs-keyword">const</span> model = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>( tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">browserFiles</span>([jsonUpload.<span class="hljs-property">files</span>[<span class="hljs-number">0</span>], weightsUpload.<span class="hljs-property">files</span>[<span class="hljs-number">0</span>]])); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">pathOrIOHandler</span> <span class="param-type">(string|io.IOHandler)</span> <span class="param-docs">Can be either of the two formats</p> <ol> <li>A string path to the <code>ModelAndWeightsConfig</code> JSON describing the model in the canonical TensorFlow.js format. For file:// (tfjs-node-only), http:// and https:// schemas, the path can be either absolute or relative. The content of the JSON file is assumed to be a JSON object with the following fields and values:</li> </ol> <ul> <li>'modelTopology': A JSON object that can be either of:</li> </ul> <ol> <li>a model architecture JSON consistent with the format of the return value of <code>keras.Model.to_json()</code></li> <li>a full model JSON in the format of <code>keras.models.save_model()</code>.</li> </ol> <ul> <li>'weightsManifest': A TensorFlow.js weights manifest. See the Python converter function <code>save_model()</code> for more details. It is also assumed that model weights can be accessed from relative paths described by the <code>paths</code> fields in weights manifest.</li> </ul> <ol start="2"> <li>A <code>tf.io.IOHandler</code> object that loads model artifacts with its <code>load</code> method.</li> </ol> </span> </li> <li class="parameter"> <span class="param-name">options</span> <span class="param-type">(Object)</span> <span class="param-docs">Optional configuration arguments for the model loading, including:</p> <ul> <li><code>strict</code>: Require that the provided weights exactly match those required by the layers. Default true. Passing false means that both extra weights and missing weights will be silently ignored.</li> <li><code>onProgress</code>: A progress callback of the form: <code>(fraction: number) =&gt; void</code>. This callback can be used to monitor the model-loading process.</li> </ul> </span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">requestInit</span> <span class="param-type">(RequestInit)</span> <span class="param-docs">RequestInit (options) for HTTP requests.</p> <p>For detailed information on the supported fields, see <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/Request/Request">https://developer.mozilla.org/en-US/docs/Web/API/Request/Request</a></span> </li> <li class="parameter config-param"> <span class="param-name">onProgress</span> <span class="param-type">(OnProgressCallback)</span> <span class="param-docs">Progress callback.</span> </li> <li class="parameter config-param"> <span class="param-name">fetchFunc</span> <span class="param-type">(typeof <a href="#fetch">tf.fetch()</a>)</span> <span class="param-docs">A function used to override the <code>window.fetch</code> function.</span> </li> <li class="parameter config-param"> <span class="param-name">strict</span> <span class="param-type">(boolean)</span> <span class="param-docs">Strict loading model: whether extraneous weights or missing weights should trigger an <code>Error</code>.</p> <p>If <code>true</code>, require that the provided weights exactly match those required by the layers. <code>false</code> means that both extra weights and missing weights will be silently ignored.</p> <p>Default: <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">weightPathPrefix</span> <span class="param-type">(string)</span> <span class="param-docs">Path prefix for weight files, by default this is calculated from the path of the model JSON file.</p> <p>For instance, if the path to the model JSON file is <code>http://localhost/foo/model.json</code>, then the default path prefix will be <code>http://localhost/foo/</code>. If a weight file has the path value <code>group1-shard1of2</code> in the weight manifest, then the weight file will be loaded from <code>http://localhost/foo/group1-shard1of2</code> by default. However, if you provide a <code>weightPathPrefix</code> value of <code>http://localhost/foo/alt-weights</code>, then the weight file will be loaded from the path <code>http://localhost/foo/alt-weights/group1-shard1of2</code> instead.</span> </li> <li class="parameter config-param"> <span class="param-name">fromTFHub</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the module or model is to be loaded from TF Hub.</p> <p>Setting this to <code>true</code> allows passing a TF-Hub module URL, omitting the standard model file name and the query parameters.</p> <p>Default: <code>false</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">weightUrlConverter</span> <span class="param-type">((weightFileName: string) =&gt; Promise&lt;string&gt;)</span> <span class="param-docs">An async function to convert weight file name to URL. The weight file names are stored in model.json's weightsManifest.paths field. By default we consider weight files are colocated with the model.json file. For example: model.json URL: https://www.google.com/models/1/model.json group1-shard1of1.bin url: https://www.google.com/models/1/group1-shard1of1.bin</p> <p>With this func you can convert the weight file name to any URL.</span> </li> <li class="parameter config-param"> <span class="param-name">streamWeights</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to stream the model directly to the backend or cache all its weights on CPU first. Useful for large models.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:LayersModel">tf.LayersModel</a>&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.browserDownloads" href="#io.browserDownloads"> tf.io.browserDownloads</a> <span class="signature">(fileNamePrefix?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/browser_files.ts#L299-L301" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates an IOHandler that triggers file downloads from the browser.</p> <p>The returned <code>IOHandler</code> instance can be used as model exporting methods such as <code>tf.Model.save</code> and supports only saving.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>( {<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>], <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); <span class="hljs-keyword">const</span> saveResult = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;downloads://mymodel&#x27;</span>); <span class="hljs-comment">// This will trigger downloading of two files:</span> <span class="hljs-comment">// &#x27;mymodel.json&#x27; and &#x27;mymodel.weights.bin&#x27;.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(saveResult); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">fileNamePrefix</span> <span class="param-type">(string)</span> <span class="param-docs">Prefix name of the files to be downloaded. For use with <code>tf.Model</code>, <code>fileNamePrefix</code> should follow either of the following two formats:</p> <ol> <li><code>null</code> or <code>undefined</code>, in which case the default file names will be used:</li> </ol> <ul> <li>'model.json' for the JSON file containing the model topology and weights manifest.</li> <li>'model.weights.bin' for the binary file containing the binary weight values.</li> </ul> <ol start="2"> <li>A single string or an Array of a single string, as the file name prefix. For example, if <code>'foo'</code> is provided, the downloaded JSON file and binary weights file will be named 'foo.json' and 'foo.weights.bin', respectively.</li> </ol> </span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">IOHandler</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.browserFiles" href="#io.browserFiles"> tf.io.browserFiles</a> <span class="signature">(files)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/browser_files.ts#L343-L345" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates an IOHandler that loads model artifacts from user-selected files.</p> <p>This method can be used for loading from files such as user-selected files in the browser. When used in conjunction with <a href="#loadLayersModel">tf.loadLayersModel()</a>, an instance of <a href="#class:LayersModel">tf.LayersModel</a> (Keras-style) can be constructed from the loaded artifacts.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Note: This code snippet won&#x27;t run properly without the actual file input</span> <span class="hljs-comment">// elements in the HTML DOM.</span> <span class="hljs-comment">// Suppose there are two HTML file input (`&lt;input type=&quot;file&quot; ...&gt;`)</span> <span class="hljs-comment">// elements.</span> <span class="hljs-keyword">const</span> uploadJSONInput = <span class="hljs-variable language_">document</span>.<span class="hljs-title function_">getElementById</span>(<span class="hljs-string">&#x27;upload-json&#x27;</span>); <span class="hljs-keyword">const</span> uploadWeightsInput = <span class="hljs-variable language_">document</span>.<span class="hljs-title function_">getElementById</span>(<span class="hljs-string">&#x27;upload-weights&#x27;</span>); <span class="hljs-keyword">const</span> model = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>(tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">browserFiles</span>( [uploadJSONInput.<span class="hljs-property">files</span>[<span class="hljs-number">0</span>], uploadWeightsInput.<span class="hljs-property">files</span>[<span class="hljs-number">0</span>]])); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">files</span> <span class="param-type">(File[])</span> <span class="param-docs"><code>File</code>s to load from. Currently, this function supports only loading from files that contain Keras-style models (i.e., <code>tf.Model</code>s), for which an <code>Array</code> of <code>File</code>s is expected (in that order):</p> <ul> <li>A JSON file containing the model topology and weight manifest.</li> <li>Optionally, one or more binary files containing the binary weights. These files must have names that match the paths in the <code>weightsManifest</code> contained by the aforementioned JSON file, or errors will be thrown during loading. These weights files have the same format as the ones generated by <code>tensorflowjs_converter</code> that comes with the <code>tensorflowjs</code> Python PIP package. If no weights files are provided, only the model topology will be loaded from the JSON file above.</li> </ul> </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">IOHandler</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.http" href="#io.http"> tf.io.http</a> <span class="signature">(path, loadOptions?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/http.ts#L362-L364" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates an IOHandler subtype that sends model artifacts to HTTP server.</p> <p>An HTTP request of the <code>multipart/form-data</code> mime type will be sent to the <code>path</code> URL. The form data includes artifacts that represent the topology and/or weights of the model. In the case of Keras-style <code>tf.Model</code>, two blobs (files) exist in form-data:</p> <ul> <li>A JSON file consisting of <code>modelTopology</code> and <code>weightsManifest</code>.</li> <li>A binary weights file consisting of the concatenated weight values. These files are in the same format as the one generated by <a href="https://js.tensorflow.org/tutorials/import-keras.html">tfjs_converter</a>.</li> </ul> <p>The following code snippet exemplifies the client-side code that uses this function:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>( tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">100</span>], <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); <span class="hljs-keyword">const</span> saveResult = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">http</span>( <span class="hljs-string">&#x27;http://model-server:5000/upload&#x27;</span>, {<span class="hljs-attr">requestInit</span>: {<span class="hljs-attr">method</span>: <span class="hljs-string">&#x27;PUT&#x27;</span>}})); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(saveResult); </code></pre> <p>If the default <code>POST</code> method is to be used, without any custom parameters such as headers, you can simply pass an HTTP or HTTPS URL to <code>model.save</code>:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> saveResult = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;http://model-server:5000/upload&#x27;</span>); </code></pre> <p>The following GitHub Gist https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864 implements a server based on <a target="_blank" rel="noopener" href="https://github.com/pallets/flask">flask</a> that can receive the request. Upon receiving the model artifacts via the request, this particular server reconstitutes instances of <a target="_blank" rel="noopener" href="https://keras.io/models/model/">Keras Models</a> in memory.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">path</span> <span class="param-type">(string)</span> <span class="param-docs">A URL path to the model. Can be an absolute HTTP path (e.g., 'http://localhost:8000/model-upload)') or a relative path (e.g., './model-upload').</span> </li> <li class="parameter"> <span class="param-name">loadOptions</span> <span class="param-type">(LoadOptions)</span> <span class="param-docs">Optional configuration for the loading. It includes the following fields:</p> <ul> <li>weightPathPrefix Optional, this specifies the path prefix for weight files, by default this is calculated from the path param.</li> <li>fetchFunc Optional, custom <code>fetch</code> function. E.g., in Node.js, the <code>fetch</code> from node-fetch can be used here.</li> <li>onProgress Optional, progress callback function, fired periodically before the load is completed.</li> </ul> </span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">IOHandler</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="loadGraphModelSync" href="#loadGraphModelSync"> tf.loadGraphModelSync</a> <span class="signature">(modelSource)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L654-L701" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Load a graph model given a synchronous IO handler with a 'load' method.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">modelSource</span> <span class="param-type">(io.IOHandlerSync| io.ModelArtifacts|[io.ModelJSON, /* Weights */ ArrayBuffer])</span> <span class="param-docs">The <code>io.IOHandlerSync</code> that loads the model, or the <code>io.ModelArtifacts</code> that encode the model, or a tuple of <code>[io.ModelJSON, ArrayBuffer]</code> of which the first element encodes the model and the second contains the weights.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:GraphModel">tf.GraphModel</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Management" href="#Models-Management" class="symbol-link"> Models / Management </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.copyModel" href="#io.copyModel"> tf.io.copyModel</a> <span class="signature">(sourceURL, destURL)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/model_management.ts#L298-L302" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Copy a model from one URL to another.</p> <p>This function supports:</p> <ol> <li>Copying within a storage medium, e.g., <code>tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')</code></li> <li>Copying between two storage mediums, e.g., <code>tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')</code></li> </ol> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// First create and save a model.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>( {<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>], <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-comment">// Then list existing models.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); <span class="hljs-comment">// Copy the model, from Local Storage to IndexedDB.</span> <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">copyModel</span>( <span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>, <span class="hljs-string">&#x27;indexeddb://demo/management/model1&#x27;</span>); <span class="hljs-comment">// List models again.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); <span class="hljs-comment">// Remove both models.</span> <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">removeModel</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">removeModel</span>(<span class="hljs-string">&#x27;indexeddb://demo/management/model1&#x27;</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">sourceURL</span> <span class="param-type">(string)</span> <span class="param-docs">Source URL of copying.</span> </li> <li class="parameter"> <span class="param-name">destURL</span> <span class="param-type">(string)</span> <span class="param-docs">Destination URL of copying.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;ModelArtifactsInfo&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.listModels" href="#io.listModels"> tf.io.listModels</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/model_management.ts#L198-L210" target=_blank>Source</a> </span> </div> <div class="documentation"><p>List all models stored in registered storage mediums.</p> <p>For a web browser environment, the registered mediums are Local Storage and IndexedDB.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// First create and save a model.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>( {<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>], <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-comment">// Then list existing models.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); <span class="hljs-comment">// Delete the model.</span> <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">removeModel</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-comment">// List models again.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); </code></pre> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;{[url: string]: ModelArtifactsInfo}&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.moveModel" href="#io.moveModel"> tf.io.moveModel</a> <span class="signature">(sourceURL, destURL)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/model_management.ts#L350-L354" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Move a model from one URL to another.</p> <p>This function supports:</p> <ol> <li>Moving within a storage medium, e.g., <code>tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')</code></li> <li>Moving between two storage mediums, e.g., <code>tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')</code></li> </ol> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// First create and save a model.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>( {<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>], <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-comment">// Then list existing models.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); <span class="hljs-comment">// Move the model, from Local Storage to IndexedDB.</span> <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">moveModel</span>( <span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>, <span class="hljs-string">&#x27;indexeddb://demo/management/model1&#x27;</span>); <span class="hljs-comment">// List models again.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); <span class="hljs-comment">// Remove the moved model.</span> <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">removeModel</span>(<span class="hljs-string">&#x27;indexeddb://demo/management/model1&#x27;</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">sourceURL</span> <span class="param-type">(string)</span> <span class="param-docs">Source URL of moving.</span> </li> <li class="parameter"> <span class="param-name">destURL</span> <span class="param-type">(string)</span> <span class="param-docs">Destination URL of moving.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;ModelArtifactsInfo&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="io.removeModel" href="#io.removeModel"> tf.io.removeModel</a> <span class="signature">(url)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/io/model_management.ts#L245-L249" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Remove a model specified by URL from a registered storage medium.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// First create and save a model.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>( {<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>], <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-comment">// Then list existing models.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); <span class="hljs-comment">// Delete the model.</span> <span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">removeModel</span>(<span class="hljs-string">&#x27;localstorage://demo/management/model1&#x27;</span>); <span class="hljs-comment">// List models again.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(<span class="hljs-keyword">await</span> tf.<span class="hljs-property">io</span>.<span class="hljs-title function_">listModels</span>())); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">url</span> <span class="param-type">(string)</span> <span class="param-docs">A URL to a stored model, with a scheme prefix, e.g., 'localstorage://my-model-1', 'indexeddb://my/model/2'.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;ModelArtifactsInfo&gt;</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Serialization" href="#Models-Serialization" class="symbol-link"> Models / Serialization </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="registerClass" href="#registerClass"> tf.registerClass</a> <span class="signature">(cls, pkg?, name?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/serialization.ts#L234-L265" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Register a class with the serialization map of TensorFlow.js.</p> <p>This is often used for registering custom Layers, so they can be serialized and deserialized.</p> <p>Example 1. Register the class without package name and specified name.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCustomLayer</span> <span class="hljs-keyword">extends</span> <span class="hljs-title class_ inherited__">tf.layers.Layer</span> { <span class="hljs-keyword">static</span> className = <span class="hljs-string">&#x27;MyCustomLayer&#x27;</span>; <span class="hljs-title function_">constructor</span>(<span class="hljs-params">config</span>) { <span class="hljs-variable language_">super</span>(config); } } tf.<span class="hljs-property">serialization</span>.<span class="hljs-title function_">registerClass</span>(<span class="hljs-title class_">MyCustomLayer</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMOBJECT</span>.<span class="hljs-title function_">get</span>(<span class="hljs-string">&quot;Custom&gt;MyCustomLayer&quot;</span>)); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMNAMES</span>.<span class="hljs-title function_">get</span>(<span class="hljs-title class_">MyCustomLayer</span>)); </code></pre> <p>Example 2. Register the class with package name: &quot;Package&quot; and specified name: &quot;MyLayer&quot;.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCustomLayer</span> <span class="hljs-keyword">extends</span> <span class="hljs-title class_ inherited__">tf.layers.Layer</span> { <span class="hljs-keyword">static</span> className = <span class="hljs-string">&#x27;MyCustomLayer&#x27;</span>; <span class="hljs-title function_">constructor</span>(<span class="hljs-params">config</span>) { <span class="hljs-variable language_">super</span>(config); } } tf.<span class="hljs-property">serialization</span>.<span class="hljs-title function_">registerClass</span>(<span class="hljs-title class_">MyCustomLayer</span>, <span class="hljs-string">&quot;Package&quot;</span>, <span class="hljs-string">&quot;MyLayer&quot;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMOBJECT</span>.<span class="hljs-title function_">get</span>(<span class="hljs-string">&quot;Package&gt;MyLayer&quot;</span>)); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMNAMES</span>.<span class="hljs-title function_">get</span>(<span class="hljs-title class_">MyCustomLayer</span>)); </code></pre> <p>Example 3. Register the class with specified name: &quot;MyLayer&quot;.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCustomLayer</span> <span class="hljs-keyword">extends</span> <span class="hljs-title class_ inherited__">tf.layers.Layer</span> { <span class="hljs-keyword">static</span> className = <span class="hljs-string">&#x27;MyCustomLayer&#x27;</span>; <span class="hljs-title function_">constructor</span>(<span class="hljs-params">config</span>) { <span class="hljs-variable language_">super</span>(config); } } tf.<span class="hljs-property">serialization</span>.<span class="hljs-title function_">registerClass</span>(<span class="hljs-title class_">MyCustomLayer</span>, <span class="hljs-literal">undefined</span>, <span class="hljs-string">&quot;MyLayer&quot;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMOBJECT</span>.<span class="hljs-title function_">get</span>(<span class="hljs-string">&quot;Custom&gt;MyLayer&quot;</span>)); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMNAMES</span>.<span class="hljs-title function_">get</span>(<span class="hljs-title class_">MyCustomLayer</span>)); </code></pre> <p>Example 4. Register the class with specified package name: &quot;Package&quot;.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCustomLayer</span> <span class="hljs-keyword">extends</span> <span class="hljs-title class_ inherited__">tf.layers.Layer</span> { <span class="hljs-keyword">static</span> className = <span class="hljs-string">&#x27;MyCustomLayer&#x27;</span>; <span class="hljs-title function_">constructor</span>(<span class="hljs-params">config</span>) { <span class="hljs-variable language_">super</span>(config); } } tf.<span class="hljs-property">serialization</span>.<span class="hljs-title function_">registerClass</span>(<span class="hljs-title class_">MyCustomLayer</span>, <span class="hljs-string">&quot;Package&quot;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMOBJECT</span> .<span class="hljs-title function_">get</span>(<span class="hljs-string">&quot;Package&gt;MyCustomLayer&quot;</span>)); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">serialization</span>.<span class="hljs-property">GLOBALCUSTOMNAMES</span> .<span class="hljs-title function_">get</span>(<span class="hljs-title class_">MyCustomLayer</span>)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">cls</span> <span class="param-type">(SerializableConstructor)</span> <span class="param-docs">The class to be registered. It must have a public static member called <code>className</code> defined and the value must be a non-empty string.</span> </li> <li class="parameter"> <span class="param-name">pkg</span> <span class="param-type">(string)</span> <span class="param-docs">The package name that this class belongs to. This used to define the key in GlobalCustomObject. If not defined, it defaults to <code>Custom</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">The name that user specified. It defaults to the actual name of the class as specified by its static <code>className</code> property.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">SerializableConstructor</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Classes" href="#Models-Classes" class="symbol-link"> Models / Classes </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:Functional" href="#class:Functional">tf.Functional</a> <span class="signature"> <span>extends <a href="#class:LayersModel">tf.LayersModel</a></span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L2205-L2207" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A <a href="#class:Functional">tf.Functional</a> is an alias to <a href="#class:LayersModel">tf.LayersModel</a>.</p> <p>See also: <a href="#class:LayersModel">tf.LayersModel</a>, <a href="#class:Sequential">tf.Sequential</a>, <a href="#loadLayersModel">tf.loadLayersModel()</a>.</p> </div> <div class="method-list"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:GraphModel" href="#class:GraphModel">tf.GraphModel</a> <span class="signature"> <span>extends InferenceModel</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L44-L591" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A <a href="#class:GraphModel">tf.GraphModel</a> is a directed, acyclic graph built from a SavedModel GraphDef and allows inference execution.</p> <p>A <a href="#class:GraphModel">tf.GraphModel</a> can only be created by loading from a model converted from a <a target="_blank" rel="noopener" href="https://www.tensorflow.org/guide/saved_model">TensorFlow SavedModel</a> using the command line converter tool and loaded via <a href="#loadGraphModel">tf.loadGraphModel()</a>.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.loadSync" href="#tf.GraphModel.loadSync"> loadSync</a> <span class="signature">(artifacts)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L176-L181" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Synchronously construct the in memory weight map and compile the inference graph.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">artifacts</span> <span class="param-type">(io.ModelArtifacts)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">boolean</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.save" href="#tf.GraphModel.save"> save</a> <span class="signature">(handlerOrURL, config?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L280-L301" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Save the configuration and/or weights of the GraphModel.</p> <p>An <code>IOHandler</code> is an object that has a <code>save</code> method of the proper signature defined. The <code>save</code> method manages the storing or transmission of serialized data (&quot;artifacts&quot;) that represent the model's topology and weights onto or via a specific medium, such as file downloads, local storage, IndexedDB in the web browser and HTTP requests to a server. TensorFlow.js provides <code>IOHandler</code> implementations for a number of frequently used saving mediums, such as <a href="#io.browserDownloads">tf.io.browserDownloads()</a> and <code>tf.io.browserLocalStorage</code>. See <code>tf.io</code> for more details.</p> <p>This method also allows you to refer to certain types of <code>IOHandler</code>s as URL-like string shortcuts, such as 'localstorage://' and 'indexeddb://'.</p> <p>Example 1: Save <code>model</code>'s topology and weights to browser <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage">local storage</a>; then load it back.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> modelUrl = <span class="hljs-string">&#x27;https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json&#x27;</span>; <span class="hljs-keyword">const</span> model = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadGraphModel</span>(modelUrl); <span class="hljs-keyword">const</span> zeros = tf.<span class="hljs-title function_">zeros</span>([<span class="hljs-number">1</span>, <span class="hljs-number">224</span>, <span class="hljs-number">224</span>, <span class="hljs-number">3</span>]); model.<span class="hljs-title function_">predict</span>(zeros).<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://my-model-1&#x27;</span>); <span class="hljs-keyword">const</span> loadedModel = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadGraphModel</span>(<span class="hljs-string">&#x27;localstorage://my-model-1&#x27;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from loaded model:&#x27;</span>); model.<span class="hljs-title function_">predict</span>(zeros).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">handlerOrURL</span> <span class="param-type">(io.IOHandler|string)</span> <span class="param-docs">An instance of <code>IOHandler</code> or a URL-like, scheme-based string shortcut for <code>IOHandler</code>.</span> </li> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs">Options for saving the model.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">trainableOnly</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to save only the trainable weights of the model, ignoring the non-trainable ones.</span> </li> <li class="parameter config-param"> <span class="param-name">includeOptimizer</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the optimizer will be saved (if exists).</p> <p>Default: <code>false</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;io.SaveResult&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.predict" href="#tf.GraphModel.predict"> predict</a> <span class="signature">(inputs, config?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L357-L361" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Execute the inference for the input tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputs</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[name: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs">Prediction configuration for specifying the batch size. Currently the batch size option is ignored for graph model.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Optional. Batch size (Integer). If unspecified, it will default to 32.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(boolean)</span> <span class="param-docs">Optional. Verbosity mode. Defaults to false.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.predictAsync" href="#tf.GraphModel.predictAsync"> predictAsync</a> <span class="signature">(inputs, config?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L403-L408" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Execute the inference for the input tensors in async fashion, use this method when your model contains control flow ops.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputs</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[name: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs">Prediction configuration for specifying the batch size. Currently the batch size option is ignored for graph model.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Optional. Batch size (Integer). If unspecified, it will default to 32.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(boolean)</span> <span class="param-docs">Optional. Verbosity mode. Defaults to false.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.execute" href="#tf.GraphModel.execute"> execute</a> <span class="signature">(inputs, outputs?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L507-L516" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes inference for the model for given input tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputs</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[name: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs">tensor, tensor array or tensor map of the inputs for the model, keyed by the input node names.</span> </li> <li class="parameter"> <span class="param-name">outputs</span> <span class="param-type">(string|string[])</span> <span class="param-docs">output node name from the TensorFlow model, if no outputs are specified, the default outputs of the model would be used. You can inspect intermediate nodes of the model by adding them to the outputs array.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.executeAsync" href="#tf.GraphModel.executeAsync"> executeAsync</a> <span class="signature">(inputs, outputs?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L534-L545" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes inference for the model for given input tensors in async fashion, use this method when your model contains control flow ops.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputs</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[name: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs">tensor, tensor array or tensor map of the inputs for the model, keyed by the input node names.</span> </li> <li class="parameter"> <span class="param-name">outputs</span> <span class="param-type">(string|string[])</span> <span class="param-docs">output node name from the TensorFlow model, if no outputs are specified, the default outputs of the model would be used. You can inspect intermediate nodes of the model by adding them to the outputs array.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.getIntermediateTensors" href="#tf.GraphModel.getIntermediateTensors"> getIntermediateTensors</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L553-L555" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Get intermediate tensors for model debugging mode (flag KEEP_INTERMEDIATE_TENSORS is true).</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">NamedTensorsMap</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.disposeIntermediateTensors" href="#tf.GraphModel.disposeIntermediateTensors"> disposeIntermediateTensors</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L563-L565" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Dispose intermediate tensors for model debugging mode (flag KEEP_INTERMEDIATE_TENSORS is true).</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.GraphModel.dispose" href="#tf.GraphModel.dispose"> dispose</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/executor/graph_model.ts#L579-L590" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Releases the memory used by the weight tensors and resourceManager.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:LayersModel" href="#class:LayersModel">tf.LayersModel</a> <span class="signature"> <span>extends Container|tfc.InferenceModel</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L475-L2195" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A <a href="#class:LayersModel">tf.LayersModel</a> is a directed, acyclic graph of <code>tf.Layer</code>s plus methods for training, evaluation, prediction and saving.</p> <p><a href="#class:LayersModel">tf.LayersModel</a> is the basic unit of training, inference and evaluation in TensorFlow.js. To create a <a href="#class:LayersModel">tf.LayersModel</a>, use <a href="#class:LayersModel">tf.LayersModel</a>.</p> <p>See also: <a href="#class:Sequential">tf.Sequential</a>, <a href="#loadLayersModel">tf.loadLayersModel()</a>.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.summary" href="#tf.LayersModel.summary"> summary</a> <span class="signature">(lineLength?, positions?, printFn?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L559-L571" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Print a text summary of the model's layers.</p> <p>The summary includes</p> <ul> <li>Name and type of all layers that comprise the model.</li> <li>Output shape(s) of the layers</li> <li>Number of weight parameters of each layer</li> <li>If the model has non-sequential-like topology, the inputs each layer receives</li> <li>The total number of trainable and non-trainable parameters of the model.</li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> dense1 = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>}).<span class="hljs-title function_">apply</span>(input1); <span class="hljs-keyword">const</span> dense2 = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>}).<span class="hljs-title function_">apply</span>(input2); <span class="hljs-keyword">const</span> concat = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">concatenate</span>().<span class="hljs-title function_">apply</span>([dense1, dense2]); <span class="hljs-keyword">const</span> output = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">3</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;softmax&#x27;</span>}).<span class="hljs-title function_">apply</span>(concat); <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">model</span>({<span class="hljs-attr">inputs</span>: [input1, input2], <span class="hljs-attr">outputs</span>: output}); model.<span class="hljs-title function_">summary</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">lineLength</span> <span class="param-type">(number)</span> <span class="param-docs">Custom line length, in number of characters.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">positions</span> <span class="param-type">(number[])</span> <span class="param-docs">Custom widths of each of the columns, as either fractions of <code>lineLength</code> (e.g., <code>[0.5, 0.75, 1]</code>) or absolute number of characters (e.g., <code>[30, 50, 65]</code>). Each number corresponds to right-most (i.e., ending) position of a column.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">printFn</span> <span class="param-type">((message?: <a href="#any">tf.any()</a>, ...optionalParams: <a href="#any">tf.any()</a>[]) =&gt; void)</span> <span class="param-docs">Custom print function. Can be used to replace the default <code>console.log</code>. For example, you can use <code>x =&gt; {}</code> to mute the printed messages in the console.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.compile" href="#tf.LayersModel.compile"> compile</a> <span class="signature">(args)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L583-L785" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Configures and prepares the model for training and evaluation. Compiling outfits the model with an optimizer, loss, and/or metrics. Calling <code>fit</code> or <code>evaluate</code> on an un-compiled model will throw an error.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs">a <code>ModelCompileArgs</code> specifying the loss, optimizer, and metrics to be used for fitting and evaluating this model.</span> </li> <li class="parameter config-param"> <span class="param-name">optimizer</span> <span class="param-type">(string|<a href="#class:train.Optimizer">tf.train.Optimizer</a>)</span> <span class="param-docs">An instance of <a href="#class:train.Optimizer">tf.train.Optimizer</a> or a string name for an Optimizer.</span> </li> <li class="parameter config-param"> <span class="param-name">loss</span> <span class="param-type">(string|string[]|{[outputName: string]: string}|LossOrMetricFn| LossOrMetricFn[]|{[outputName: string]: LossOrMetricFn})</span> <span class="param-docs">Object function(s) or name(s) of object function(s). If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or an Array of losses. The loss value that will be minimized by the model will then be the sum of all individual losses.</span> </li> <li class="parameter config-param"> <span class="param-name">metrics</span> <span class="param-type">(string|LossOrMetricFn|Array| {[outputName: string]: string | LossOrMetricFn})</span> <span class="param-docs">List of metrics to be evaluated by the model during training and testing. Typically you will use <code>metrics=['accuracy']</code>. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.evaluate" href="#tf.LayersModel.evaluate"> evaluate</a> <span class="signature">(x, y, args?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L840-L864" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the loss value &amp; metrics values for the model in test mode.</p> <p>Loss and metrics are specified during <code>compile()</code>, which needs to happen before calls to <code>evaluate()</code>.</p> <p>Computation is done in batches.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>({ <span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>]})] }); model.<span class="hljs-title function_">compile</span>({<span class="hljs-attr">optimizer</span>: <span class="hljs-string">&#x27;sgd&#x27;</span>, <span class="hljs-attr">loss</span>: <span class="hljs-string">&#x27;meanSquaredError&#x27;</span>}); <span class="hljs-keyword">const</span> result = model.evaluate( tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">8</span>, <span class="hljs-number">10</span>]), tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">8</span>, <span class="hljs-number">1</span>]), {<span class="hljs-attr">batchSize</span>: <span class="hljs-number">4</span>}); result.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs"><a href="#class:Tensor">tf.Tensor</a> of test data, or an <code>Array</code> of <a href="#class:Tensor">tf.Tensor</a>s if the model has multiple inputs.</span> </li> <li class="parameter"> <span class="param-name">y</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs"><a href="#class:Tensor">tf.Tensor</a> of target data, or an <code>Array</code> of <a href="#class:Tensor">tf.Tensor</a>s if the model has multiple outputs.</span> </li> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs">A <code>ModelEvaluateArgs</code>, containing optional fields.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Batch size (Integer). If unspecified, it will default to 32.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(ModelLoggingVerbosity)</span> <span class="param-docs">Verbosity mode.</span> </li> <li class="parameter config-param"> <span class="param-name">sampleWeight</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Tensor of weights to weight the contribution of different samples to the loss and metrics.</span> </li> <li class="parameter config-param"> <span class="param-name">steps</span> <span class="param-type">(number)</span> <span class="param-docs">integer: total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of <code>undefined</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Scalar</a>|<a href="#class:Tensor">tf.Scalar</a>[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.evaluateDataset" href="#tf.LayersModel.evaluateDataset"> evaluateDataset</a> <span class="signature">(dataset, args?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L888-L892" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Evaluate model using a dataset object.</p> <p>Note: Unlike <code>evaluate()</code>, this method is asynchronous (<code>async</code>).</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">dataset</span> <span class="param-type">(<a href="#class:data.Dataset">tf.data.Dataset</a>)</span> <span class="param-docs">A dataset object. Its <code>iterator()</code> method is expected to generate a dataset iterator object, the <code>next()</code> method of which is expected to produce data batches for evaluation. The return value of the <code>next()</code> call ought to contain a boolean <code>done</code> field and a <code>value</code> field. The <code>value</code> field is expected to be an array of two <a href="#class:Tensor">tf.Tensor</a>s or an array of two nested <a href="#class:Tensor">tf.Tensor</a> structures. The former case is for models with exactly one input and one output (e.g. a sequential model). The latter case is for models with multiple inputs and/or multiple outputs. Of the two items in the array, the first is the input feature(s) and the second is the output target(s).</span> </li> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs">A configuration object for the dataset-based evaluation.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">batches</span> <span class="param-type">(number)</span> <span class="param-docs">Number of batches to draw from the dataset object before ending the evaluation.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(ModelLoggingVerbosity)</span> <span class="param-docs">Verbosity mode.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Scalar</a>|<a href="#class:Tensor">tf.Scalar</a>[]&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.predict" href="#tf.LayersModel.predict"> predict</a> <span class="signature">(x, args?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L1105-L1120" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Generates output predictions for the input samples.</p> <p>Computation is done in batches.</p> <p>Note: the &quot;step&quot; mode of predict() is currently not supported. This is because the TensorFlow.js core backend is imperative only.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>({ <span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>]})] }); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">8</span>, <span class="hljs-number">10</span>]), {<span class="hljs-attr">batchSize</span>: <span class="hljs-number">4</span>}).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">The input data, as a Tensor, or an <code>Array</code> of <a href="#class:Tensor">tf.Tensor</a>s if the model has multiple inputs.</span> </li> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs">A <code>ModelPredictArgs</code> object containing optional fields.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Optional. Batch size (Integer). If unspecified, it will default to 32.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(boolean)</span> <span class="param-docs">Optional. Verbosity mode. Defaults to false.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.predictOnBatch" href="#tf.LayersModel.predictOnBatch"> predictOnBatch</a> <span class="signature">(x)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L1137-L1143" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns predictions for a single batch of samples.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>({ <span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>]})] }); model.<span class="hljs-title function_">predictOnBatch</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">8</span>, <span class="hljs-number">10</span>])).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">: Input samples, as a Tensor (for models with exactly one input) or an array of Tensors (for models with more than one input).</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.fit" href="#tf.LayersModel.fit"> fit</a> <span class="signature">(x, y, args?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L1464-L1602" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Trains the model for a fixed number of epochs (iterations on a dataset).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>({ <span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>]})] }); model.<span class="hljs-title function_">compile</span>({<span class="hljs-attr">optimizer</span>: <span class="hljs-string">&#x27;sgd&#x27;</span>, <span class="hljs-attr">loss</span>: <span class="hljs-string">&#x27;meanSquaredError&#x27;</span>}); <span class="hljs-keyword">for</span> (<span class="hljs-keyword">let</span> i = <span class="hljs-number">1</span>; i &lt; <span class="hljs-number">5</span> ; ++i) { <span class="hljs-keyword">const</span> h = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">fit</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">8</span>, <span class="hljs-number">10</span>]), tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">8</span>, <span class="hljs-number">1</span>]), { <span class="hljs-attr">batchSize</span>: <span class="hljs-number">4</span>, <span class="hljs-attr">epochs</span>: <span class="hljs-number">3</span> }); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&quot;Loss after Epoch &quot;</span> + i + <span class="hljs-string">&quot; : &quot;</span> + h.<span class="hljs-property">history</span>.<span class="hljs-property">loss</span>[<span class="hljs-number">0</span>]); } </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[inputName: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs"><a href="#class:Tensor">tf.Tensor</a> of training data, or an array of <a href="#class:Tensor">tf.Tensor</a>s if the model has multiple inputs. If all inputs in the model are named, you can also pass a dictionary mapping input names to <a href="#class:Tensor">tf.Tensor</a>s.</span> </li> <li class="parameter"> <span class="param-name">y</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[inputName: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs"><a href="#class:Tensor">tf.Tensor</a> of target (label) data, or an array of <a href="#class:Tensor">tf.Tensor</a>s if the model has multiple outputs. If all outputs in the model are named, you can also pass a dictionary mapping output names to <a href="#class:Tensor">tf.Tensor</a>s.</span> </li> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs">A <code>ModelFitArgs</code>, containing optional fields.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Number of samples per gradient update. If unspecified, it will default to 32.</span> </li> <li class="parameter config-param"> <span class="param-name">epochs</span> <span class="param-type">(number)</span> <span class="param-docs">Integer number of times to iterate over the training data arrays.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(ModelLoggingVerbosity | 2)</span> <span class="param-docs">Verbosity level.</p> <p>Expected to be 0, 1, or 2. Default: 1.</p> <p>0 - No printed message during fit() call. 1 - In Node.js (tfjs-node), prints the progress bar, together with real-time updates of loss and metric values and training speed. In the browser: no action. This is the default. 2 - Not implemented yet.</span> </li> <li class="parameter config-param"> <span class="param-name">callbacks</span> <span class="param-type">(BaseCallback[]|CustomCallbackArgs|CustomCallbackArgs[])</span> <span class="param-docs">List of callbacks to be called during training. Can have one or more of the following callbacks:</p> <ul> <li><code>onTrainBegin(logs)</code>: called when training starts.</li> <li><code>onTrainEnd(logs)</code>: called when training ends.</li> <li><code>onEpochBegin(epoch, logs)</code>: called at the start of every epoch.</li> <li><code>onEpochEnd(epoch, logs)</code>: called at the end of every epoch.</li> <li><code>onBatchBegin(batch, logs)</code>: called at the start of every batch.</li> <li><code>onBatchEnd(batch, logs)</code>: called at the end of every batch.</li> <li><code>onYield(epoch, batch, logs)</code>: called every <code>yieldEvery</code> milliseconds with the current epoch, batch and logs. The logs are the same as in <code>onBatchEnd()</code>. Note that <code>onYield</code> can skip batches or epochs. See also docs for <code>yieldEvery</code> below.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">validationSplit</span> <span class="param-type">(number)</span> <span class="param-docs">Float between 0 and 1: fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the <code>x</code> and <code>y</code> data provided, before shuffling.</span> </li> <li class="parameter config-param"> <span class="param-name">validationData</span> <span class="param-type">([ <a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[], <a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[] ]|[<a href="#class:Tensor">tf.Tensor</a> | <a href="#class:Tensor">tf.Tensor</a>[], <a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[], <a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]])</span> <span class="param-docs">Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. This could be a tuple [xVal, yVal] or a tuple [xVal, yVal, valSampleWeights]. The model will not be trained on this data. <code>validationData</code> will override <code>validationSplit</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">shuffle</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to shuffle the training data before each epoch. Has no effect when <code>stepsPerEpoch</code> is not <code>null</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">classWeight</span> <span class="param-type">(ClassWeight|ClassWeight[]|ClassWeightMap)</span> <span class="param-docs">Optional object mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to &quot;pay more attention&quot; to samples from an under-represented class.</p> <p>If the model has multiple outputs, a class weight can be specified for each of the outputs by setting this field an array of weight object or an object that maps model output names (e.g., <code>model.outputNames[0]</code>) to weight objects.</span> </li> <li class="parameter config-param"> <span class="param-name">sampleWeight</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequenceLength), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sampleWeightMode=&quot;temporal&quot; in compile().</span> </li> <li class="parameter config-param"> <span class="param-name">initialEpoch</span> <span class="param-type">(number)</span> <span class="param-docs">Epoch at which to start training (useful for resuming a previous training run). When this is used, <code>epochs</code> is the index of the &quot;final epoch&quot;. The model is not trained for a number of iterations given by <code>epochs</code>, but merely until the epoch of index <code>epochs</code> is reached.</span> </li> <li class="parameter config-param"> <span class="param-name">stepsPerEpoch</span> <span class="param-type">(number)</span> <span class="param-docs">Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with Input Tensors such as TensorFlow data tensors, the default <code>null</code> is equal to the number of unique samples in your dataset divided by the batch size, or 1 if that cannot be determined.</span> </li> <li class="parameter config-param"> <span class="param-name">validationSteps</span> <span class="param-type">(number)</span> <span class="param-docs">Only relevant if <code>stepsPerEpoch</code> is specified. Total number of steps (batches of samples) to validate before stopping.</span> </li> <li class="parameter config-param"> <span class="param-name">yieldEvery</span> <span class="param-type">(YieldEveryOptions)</span> <span class="param-docs">Configures the frequency of yielding the main thread to other tasks.</p> <p>In the browser environment, yielding the main thread can improve the responsiveness of the page during training. In the Node.js environment, it can ensure tasks queued in the event loop can be handled in a timely manner.</p> <p>The value can be one of the following:</p> <ul> <li><code>'auto'</code>: The yielding happens at a certain frame rate (currently set at 125ms). This is the default.</li> <li><code>'batch'</code>: yield every batch.</li> <li><code>'epoch'</code>: yield every epoch.</li> <li>any <code>number</code>: yield every <code>number</code> milliseconds.</li> <li><code>'never'</code>: never yield. (yielding can still happen through <code>await nextFrame()</code> calls in custom callbacks.)</li> </ul> </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;History&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.fitDataset" href="#tf.LayersModel.fitDataset"> fitDataset</a> <span class="signature">(dataset, args)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L1791-L1794" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Trains the model using a dataset object.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">dataset</span> <span class="param-type">(<a href="#class:data.Dataset">tf.data.Dataset</a>)</span> <span class="param-docs">A dataset object. Its <code>iterator()</code> method is expected to generate a dataset iterator object, the <code>next()</code> method of which is expected to produce data batches for training. The return value of the <code>next()</code> call ought to contain a boolean <code>done</code> field and a <code>value</code> field. The <code>value</code> field is expected to be an array of two <a href="#class:Tensor">tf.Tensor</a>s or an array of two nested <a href="#class:Tensor">tf.Tensor</a> structures. The former case is for models with exactly one input and one output (e.g. a sequential model). The latter case is for models with multiple inputs and/or multiple outputs. Of the two items in the array, the first is the input feature(s) and the second is the output target(s).</span> </li> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs">A <code>ModelFitDatasetArgs</code>, containing optional fields.</span> </li> <li class="parameter config-param"> <span class="param-name">batchesPerEpoch</span> <span class="param-type">(number)</span> <span class="param-docs">(Optional) Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of samples of your dataset divided by the batch size, so that <code>fitDataset</code>() call can utilize the entire dataset. If it is not provided, use <code>done</code> return value in <code>iterator.next()</code> as signal to finish an epoch.</span> </li> <li class="parameter config-param"> <span class="param-name">epochs</span> <span class="param-type">(number)</span> <span class="param-docs">Integer number of times to iterate over the training dataset.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(ModelLoggingVerbosity)</span> <span class="param-docs">Verbosity level.</p> <p>Expected to be 0, 1, or 2. Default: 1.</p> <p>0 - No printed message during fit() call. 1 - In Node.js (tfjs-node), prints the progress bar, together with real-time updates of loss and metric values and training speed. In the browser: no action. This is the default. 2 - Not implemented yet.</span> </li> <li class="parameter config-param"> <span class="param-name">callbacks</span> <span class="param-type">(BaseCallback[]|CustomCallbackArgs|CustomCallbackArgs[])</span> <span class="param-docs">List of callbacks to be called during training. Can have one or more of the following callbacks:</p> <ul> <li><code>onTrainBegin(logs)</code>: called when training starts.</li> <li><code>onTrainEnd(logs)</code>: called when training ends.</li> <li><code>onEpochBegin(epoch, logs)</code>: called at the start of every epoch.</li> <li><code>onEpochEnd(epoch, logs)</code>: called at the end of every epoch.</li> <li><code>onBatchBegin(batch, logs)</code>: called at the start of every batch.</li> <li><code>onBatchEnd(batch, logs)</code>: called at the end of every batch.</li> <li><code>onYield(epoch, batch, logs)</code>: called every <code>yieldEvery</code> milliseconds with the current epoch, batch and logs. The logs are the same as in <code>onBatchEnd()</code>. Note that <code>onYield</code> can skip batches or epochs. See also docs for <code>yieldEvery</code> below.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">validationData</span> <span class="param-type">([ TensorOrArrayOrMap, TensorOrArrayOrMap ]|[TensorOrArrayOrMap, TensorOrArrayOrMap, TensorOrArrayOrMap]|<a href="#class:data.Dataset">tf.data.Dataset</a>)</span> <span class="param-docs">Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. This could be any of the following:</p> <ul> <li>An array <code>[xVal, yVal]</code>, where the two values may be <a href="#class:Tensor">tf.Tensor</a>, an array of Tensors, or a map of string to Tensor.</li> <li>Similarly, an array <code> [xVal, yVal, valSampleWeights]</code> (not implemented yet).</li> <li>a <code>Dataset</code> object with elements of the form <code>{xs: xVal, ys: yVal}</code>, where <code>xs</code> and <code>ys</code> are the feature and label tensors, respectively.</li> </ul> <p>If <code>validationData</code> is an Array of Tensor objects, each <a href="#class:Tensor">tf.Tensor</a> will be sliced into batches during validation, using the parameter <code>validationBatchSize</code> (which defaults to 32). The entirety of the <a href="#class:Tensor">tf.Tensor</a> objects will be used in the validation.</p> <p>If <code>validationData</code> is a dataset object, and the <code>validationBatches</code> parameter is specified, the validation will use <code>validationBatches</code> batches drawn from the dataset object. If <code>validationBatches</code> parameter is not specified, the validation will stop when the dataset is exhausted.</p> <p>The model will not be trained on this data.</span> </li> <li class="parameter config-param"> <span class="param-name">validationBatchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Optional batch size for validation.</p> <p>Used only if <code>validationData</code> is an array of <a href="#class:Tensor">tf.Tensor</a> objects, i.e., not a dataset object.</p> <p>If not specified, its value defaults to 32.</span> </li> <li class="parameter config-param"> <span class="param-name">validationBatches</span> <span class="param-type">(number)</span> <span class="param-docs">(Optional) Only relevant if <code>validationData</code> is specified and is a dataset object.</p> <p>Total number of batches of samples to draw from <code>validationData</code> for validation purpose before stopping at the end of every epoch. If not specified, <code>evaluateDataset</code> will use <code>iterator.next().done</code> as signal to stop validation.</span> </li> <li class="parameter config-param"> <span class="param-name">yieldEvery</span> <span class="param-type">(YieldEveryOptions)</span> <span class="param-docs">Configures the frequency of yielding the main thread to other tasks.</p> <p>In the browser environment, yielding the main thread can improve the responsiveness of the page during training. In the Node.js environment, it can ensure tasks queued in the event loop can be handled in a timely manner.</p> <p>The value can be one of the following:</p> <ul> <li><code>'auto'</code>: The yielding happens at a certain frame rate (currently set at 125ms). This is the default.</li> <li><code>'batch'</code>: yield every batch.</li> <li><code>'epoch'</code>: yield every epoch.</li> <li>a <code>number</code>: Will yield every <code>number</code> milliseconds.</li> <li><code>'never'</code>: never yield. (But yielding can still happen through <code>await nextFrame()</code> calls in custom callbacks.)</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">initialEpoch</span> <span class="param-type">(number)</span> <span class="param-docs">Epoch at which to start training (useful for resuming a previous training run). When this is used, <code>epochs</code> is the index of the &quot;final epoch&quot;. The model is not trained for a number of iterations given by <code>epochs</code>, but merely until the epoch of index <code>epochs</code> is reached.</span> </li> <li class="parameter config-param"> <span class="param-name">classWeight</span> <span class="param-type">(ClassWeight|ClassWeight[]|ClassWeightMap)</span> <span class="param-docs">Optional object mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to &quot;pay more attention&quot; to samples from an under-represented class.</p> <p>If the model has multiple outputs, a class weight can be specified for each of the outputs by setting this field an array of weight object or an object that maps model output names (e.g., <code>model.outputNames[0]</code>) to weight objects.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;History&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.trainOnBatch" href="#tf.LayersModel.trainOnBatch"> trainOnBatch</a> <span class="signature">(x, y)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L1819-L1839" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Runs a single gradient update on a single batch of data.</p> <p>This method differs from <code>fit()</code> and <code>fitDataset()</code> in the following regards:</p> <ul> <li>It operates on exactly one batch of data.</li> <li>It returns only the loss and metric values, instead of returning the batch-by-batch loss and metric values.</li> <li>It doesn't support fine-grained options such as verbosity and callbacks.</li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[inputName: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs">Input data. It could be one of the following:</p> <ul> <li>A <a href="#class:Tensor">tf.Tensor</a>, or an Array of <a href="#class:Tensor">tf.Tensor</a>s (in case the model has multiple inputs).</li> <li>An Object mapping input names to corresponding <a href="#class:Tensor">tf.Tensor</a> (if the model has named inputs).</li> </ul> </span> </li> <li class="parameter"> <span class="param-name">y</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]| {[inputName: string]: <a href="#class:Tensor">tf.Tensor</a>})</span> <span class="param-docs">Target data. It could be either a <a href="#class:Tensor">tf.Tensor</a> or multiple <a href="#class:Tensor">tf.Tensor</a>s. It should be consistent with <code>x</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;number|number[]&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.save" href="#tf.LayersModel.save"> save</a> <span class="signature">(handlerOrURL, config?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/training.ts#L2112-L2166" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Save the configuration and/or weights of the LayersModel.</p> <p>An <code>IOHandler</code> is an object that has a <code>save</code> method of the proper signature defined. The <code>save</code> method manages the storing or transmission of serialized data (&quot;artifacts&quot;) that represent the model's topology and weights onto or via a specific medium, such as file downloads, local storage, IndexedDB in the web browser and HTTP requests to a server. TensorFlow.js provides <code>IOHandler</code> implementations for a number of frequently used saving mediums, such as <a href="#io.browserDownloads">tf.io.browserDownloads()</a> and <code>tf.io.browserLocalStorage</code>. See <code>tf.io</code> for more details.</p> <p>This method also allows you to refer to certain types of <code>IOHandler</code>s as URL-like string shortcuts, such as 'localstorage://' and 'indexeddb://'.</p> <p>Example 1: Save <code>model</code>'s topology and weights to browser <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage">local storage</a>; then load it back.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>( {<span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})]}); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from original model:&#x27;</span>); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;localstorage://my-model-1&#x27;</span>); <span class="hljs-keyword">const</span> loadedModel = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>(<span class="hljs-string">&#x27;localstorage://my-model-1&#x27;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from loaded model:&#x27;</span>); loadedModel.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); </code></pre> <p>Example 2. Saving <code>model</code>'s topology and weights to browser <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API">IndexedDB</a>; then load it back.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>( {<span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})]}); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from original model:&#x27;</span>); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;indexeddb://my-model-1&#x27;</span>); <span class="hljs-keyword">const</span> loadedModel = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">loadLayersModel</span>(<span class="hljs-string">&#x27;indexeddb://my-model-1&#x27;</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Prediction from loaded model:&#x27;</span>); loadedModel.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); </code></pre> <p>Example 3. Saving <code>model</code>'s topology and weights as two files (<code>my-model-1.json</code> and <code>my-model-1.weights.bin</code>) downloaded from browser.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>( {<span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})]}); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;downloads://my-model-1&#x27;</span>); </code></pre> <p>Example 4. Send <code>model</code>'s topology and weights to an HTTP server. See the documentation of <a href="#io.http">tf.io.http()</a> for more details including specifying request parameters and implementation of the server.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>( {<span class="hljs-attr">layers</span>: [tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})]}); <span class="hljs-keyword">const</span> saveResults = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">save</span>(<span class="hljs-string">&#x27;http://my-server/model/upload&#x27;</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">handlerOrURL</span> <span class="param-type">(io.IOHandler|string)</span> <span class="param-docs">An instance of <code>IOHandler</code> or a URL-like, scheme-based string shortcut for <code>IOHandler</code>.</span> </li> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs">Options for saving the model.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">trainableOnly</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to save only the trainable weights of the model, ignoring the non-trainable ones.</span> </li> <li class="parameter config-param"> <span class="param-name">includeOptimizer</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the optimizer will be saved (if exists).</p> <p>Default: <code>false</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;io.SaveResult&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.getLayer" href="#tf.LayersModel.getLayer"> getLayer</a> <span class="signature">(name)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/container.ts#L1023-L1023" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Retrieves a layer based on either its name (unique) or index.</p> <p>Indices are based on order of horizontal graph traversal (bottom-up).</p> <p>If both <code>name</code> and <code>index</code> are specified, <code>index</code> takes precedence.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of layer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:layers.Layer">tf.layers.Layer</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.getLayer" href="#tf.LayersModel.getLayer"> getLayer</a> <span class="signature">(name)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/container.ts#L1024-L1024" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Retrieves a layer based on either its name (unique) or index.</p> <p>Indices are based on order of horizontal graph traversal (bottom-up).</p> <p>If both <code>name</code> and <code>index</code> are specified, <code>index</code> takes precedence.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of layer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:layers.Layer">tf.layers.Layer</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.getLayer" href="#tf.LayersModel.getLayer"> getLayer</a> <span class="signature">(name)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/container.ts#L1025-L1025" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Retrieves a layer based on either its name (unique) or index.</p> <p>Indices are based on order of horizontal graph traversal (bottom-up).</p> <p>If both <code>name</code> and <code>index</code> are specified, <code>index</code> takes precedence.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of layer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:layers.Layer">tf.layers.Layer</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.LayersModel.getLayer" href="#tf.LayersModel.getLayer"> getLayer</a> <span class="signature">(name)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/container.ts#L1026-L1044" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Retrieves a layer based on either its name (unique) or index.</p> <p>Indices are based on order of horizontal graph traversal (bottom-up).</p> <p>If both <code>name</code> and <code>index</code> are specified, <code>index</code> takes precedence.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of layer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:layers.Layer">tf.layers.Layer</a></span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:Sequential" href="#class:Sequential">tf.Sequential</a> <span class="signature"> <span>extends <a href="#class:LayersModel">tf.LayersModel</a></span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/models.ts#L397-L1113" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A model with a stack of layers, feeding linearly from one to the next.</p> <p><a href="#sequential">tf.sequential()</a> is a factory function that creates an instance of <a href="#class:Sequential">tf.Sequential</a>.</p> <pre class="hljs"><code class="hljs language-js"> <span class="hljs-comment">// Define a model for linear regression.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">1</span>]})); <span class="hljs-comment">// Prepare the model for training: Specify the loss and the optimizer.</span> model.<span class="hljs-title function_">compile</span>({<span class="hljs-attr">loss</span>: <span class="hljs-string">&#x27;meanSquaredError&#x27;</span>, <span class="hljs-attr">optimizer</span>: <span class="hljs-string">&#x27;sgd&#x27;</span>}); <span class="hljs-comment">// Generate some synthetic data for training.</span> <span class="hljs-keyword">const</span> xs = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">1</span>]); <span class="hljs-keyword">const</span> ys = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>, <span class="hljs-number">5</span>, <span class="hljs-number">7</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">1</span>]); <span class="hljs-comment">// Train the model using the data then do inference on a data point the</span> <span class="hljs-comment">// model hasn&#x27;t seen:</span> <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">fit</span>(xs, ys); model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">5</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>])).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.Sequential.add" href="#tf.Sequential.add"> add</a> <span class="signature">(layer)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/models.ts#L452-L555" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Adds a layer instance on top of the layer stack.</p> <pre class="hljs"><code class="hljs language-js"> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">1</span>]})); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;relu6&#x27;</span>})); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;relu6&#x27;</span>})); <span class="hljs-comment">// Note that the untrained model is random at this point.</span> model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">randomNormal</span>([<span class="hljs-number">10</span>, <span class="hljs-number">1</span>])).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">layer</span> <span class="param-type">(<a href="#class:layers.Layer">tf.layers.Layer</a>)</span> <span class="param-docs">Layer instance.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:SymbolicTensor" href="#class:SymbolicTensor">tf.SymbolicTensor</a> <span class="signature"> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L98-L145" target=_blank>Source</a> </span> </div> <div class="documentation"><p><a href="#class:SymbolicTensor">tf.SymbolicTensor</a> is a placeholder for a Tensor without any concrete value.</p> <p>They are most often encountered when building a graph of <code>Layer</code>s for a <a href="#class:LayersModel">tf.LayersModel</a> and the input data's shape, but not values are known.</p> </div> <div class="method-list"> </div> </div> <div class="subheading"> <div class="title"> <a name="Models-Op Registry" href="#Models-Op Registry" class="symbol-link"> Models / Op Registry </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="deregisterOp" href="#deregisterOp"> tf.deregisterOp</a> <span class="signature">(name)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/operations/custom_op/register.ts#L78-L80" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Deregister the Op for graph model executor.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">The Tensorflow Op name.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="getRegisteredOp" href="#getRegisteredOp"> tf.getRegisteredOp</a> <span class="signature">(name)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/operations/custom_op/register.ts#L67-L69" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Retrieve the OpMapper object for the registered op.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">The Tensorflow Op name.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">OpMapper</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="registerOp" href="#registerOp"> tf.registerOp</a> <span class="signature">(name, opFunc)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-converter/src/operations/custom_op/register.ts#L48-L58" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Register an Op for graph model executor. This allows you to register TensorFlow custom op or override existing op.</p> <p>Here is an example of registering a new MatMul Op.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> <span class="hljs-title function_">customMatmul</span> = (<span class="hljs-params">node</span>) =&gt; tf.<span class="hljs-title function_">matMul</span>( node.<span class="hljs-property">inputs</span>[<span class="hljs-number">0</span>], node.<span class="hljs-property">inputs</span>[<span class="hljs-number">1</span>], node.<span class="hljs-property">attrs</span>[<span class="hljs-string">&#x27;transpose_a&#x27;</span>], node.<span class="hljs-property">attrs</span>[<span class="hljs-string">&#x27;transpose_b&#x27;</span>]); tf.<span class="hljs-title function_">registerOp</span>(<span class="hljs-string">&#x27;MatMul&#x27;</span>, customMatmul); </code></pre> <p>The inputs and attrs of the node object are based on the TensorFlow op registry.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">The Tensorflow Op name.</span> </li> <li class="parameter"> <span class="param-name">opFunc</span> <span class="param-type">(Object)</span> <span class="param-docs">An op function which is called with the current graph node during execution and needs to return a tensor or a list of tensors. The node has the following attributes:</p> <ul> <li>attr: A map from attribute name to its value</li> <li>inputs: A list of input tensors</li> </ul> </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Layers" href="#Layers" class="symbol-link">Layers</a> </div> <div class="description"> <p>Layers are the primary building block for constructing a Model. Each layer will typically perform some computation to transform its input to its output.</p> <p>Layers will automatically take care of creating and initializing the various internal variables/weights they need to function.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Advanced Activation" href="#Layers-Advanced Activation" class="symbol-link"> Layers / Advanced Activation </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.elu" href="#layers.elu"> tf.layers.elu</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L102-L104" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Exponential Linear Unit (ELU).</p> <p>It follows: <code>f(x) = alpha * (exp(x) - 1.) for x &lt; 0</code>, <code>f(x) = x for x &gt;= 0</code>.</p> <p>Input shape: Arbitrary. Use the configuration <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as the input.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="https://arxiv.org/abs/1511.07289v1">Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">alpha</span> <span class="param-type">(number)</span> <span class="param-docs">Float <code>&gt;= 0</code>. Negative slope coefficient. Defaults to <code>1.0</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">ELU</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.leakyReLU" href="#layers.leakyReLU"> tf.layers.leakyReLU</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L147-L149" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Leaky version of a rectified linear unit.</p> <p>It allows a small gradient when the unit is not active: <code>f(x) = alpha * x for x &lt; 0.</code> <code>f(x) = x for x &gt;= 0.</code></p> <p>Input shape: Arbitrary. Use the configuration <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as the input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">alpha</span> <span class="param-type">(number)</span> <span class="param-docs">Float <code>&gt;= 0</code>. Negative slope coefficient. Defaults to <code>0.3</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">LeakyReLU</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.prelu" href="#layers.prelu"> tf.layers.prelu</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L172-L174" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Parameterized version of a leaky rectified linear unit.</p> <p>It follows <code>f(x) = alpha * x for x &lt; 0.</code> <code>f(x) = x for x &gt;= 0.</code> wherein <code>alpha</code> is a trainable weight.</p> <p>Input shape: Arbitrary. Use the configuration <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as the input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">alphaInitializer</span> <span class="param-type">(<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>|'constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string)</span> <span class="param-docs">Initializer for the learnable alpha.</span> </li> <li class="parameter config-param"> <span class="param-name">alphaRegularizer</span> <span class="param-type">(Regularizer)</span> <span class="param-docs">Regularizer for the learnable alpha.</span> </li> <li class="parameter config-param"> <span class="param-name">alphaConstraint</span> <span class="param-type">(<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the learnable alpha.</span> </li> <li class="parameter config-param"> <span class="param-name">sharedAxes</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The axes along which to share learnable parameters for the activation function. For example, if the incoming feature maps are from a 2D convolution with output shape <code>[numExamples, height, width, channels]</code>, and you wish to share parameters across space (height and width) so that each filter channels has only one set of parameters, set <code>shared_axes: [1, 2]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">PReLU</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.reLU" href="#layers.reLU"> tf.layers.reLU</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L123-L125" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Rectified Linear Unit activation function.</p> <p>Input shape: Arbitrary. Use the config field <code>inputShape</code> (Array of integers, does not include the sample axis) when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as the input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">maxValue</span> <span class="param-type">(number)</span> <span class="param-docs">Float, the maximum output value.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">ReLU</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.softmax" href="#layers.softmax"> tf.layers.softmax</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L192-L194" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Softmax activation layer.</p> <p>Input shape: Arbitrary. Use the configuration <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as the input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">Integer, axis along which the softmax normalization is applied. Defaults to <code>-1</code> (i.e., the last axis).</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Softmax</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.thresholdedReLU" href="#layers.thresholdedReLU"> tf.layers.thresholdedReLU</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L220-L222" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Thresholded Rectified Linear Unit.</p> <p>It follows: <code>f(x) = x for x &gt; theta</code>, <code>f(x) = 0 otherwise</code>.</p> <p>Input shape: Arbitrary. Use the configuration <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as the input.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="http://arxiv.org/abs/1402.3337">Zero-Bias Autoencoders and the Benefits of Co-Adapting Features</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">theta</span> <span class="param-type">(number)</span> <span class="param-docs">Float &gt;= 0. Threshold location of activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">ThresholdedReLU</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Basic" href="#Layers-Basic" class="symbol-link"> Layers / Basic </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.activation" href="#layers.activation"> tf.layers.activation</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L482-L484" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Applies an activation function to an output.</p> <p>This layer applies element-wise activation function. Other layers, notably <code>dense</code> can also apply activation functions. Use this isolated activation function to extract the values before and after the activation. For instance:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">5</span>]}); <span class="hljs-keyword">const</span> denseLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>}); <span class="hljs-keyword">const</span> activationLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">activation</span>({<span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;relu6&#x27;</span>}); <span class="hljs-comment">// Obtain the output symbolic tensors by applying the layers in order.</span> <span class="hljs-keyword">const</span> denseOutput = denseLayer.<span class="hljs-title function_">apply</span>(input); <span class="hljs-keyword">const</span> activationOutput = activationLayer.<span class="hljs-title function_">apply</span>(denseOutput); <span class="hljs-comment">// Create the model based on the inputs.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">model</span>({ <span class="hljs-attr">inputs</span>: input, <span class="hljs-attr">outputs</span>: [denseOutput, activationOutput] }); <span class="hljs-comment">// Collect both outputs and print separately.</span> <span class="hljs-keyword">const</span> [denseOut, activationOut] = model.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">randomNormal</span>([<span class="hljs-number">6</span>, <span class="hljs-number">5</span>])); denseOut.<span class="hljs-title function_">print</span>(); activationOut.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Name of the activation function to use.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Activation</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.dense" href="#layers.dense"> tf.layers.dense</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L519-L521" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a dense (fully connected) layer.</p> <p>This layer implements the operation: <code>output = activation(dot(input, kernel) + bias)</code></p> <p><code>activation</code> is the element-wise activation function passed as the <code>activation</code> argument.</p> <p><code>kernel</code> is a weights matrix created by the layer.</p> <p><code>bias</code> is a bias vector created by the layer (only applicable if <code>useBias</code> is <code>true</code>).</p> <p><em>Input shape:</em>*</p> <p>nD <a href="#class:Tensor">tf.Tensor</a> with shape: <code>(batchSize, ..., inputDim)</code>.</p> <p>The most common situation would be a 2D input with shape <code>(batchSize, inputDim)</code>.</p> <p><em>Output shape:</em>*</p> <p>nD tensor with shape: <code>(batchSize, ..., units)</code>.</p> <p>For instance, for a 2D input with shape <code>(batchSize, inputDim)</code>, the output would have shape <code>(batchSize, units)</code>.</p> <p>Note: if the input to the layer has a rank greater than 2, then it is flattened prior to the initial dot product with the kernel.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use.</p> <p>If unspecified, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to apply a bias.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the dense kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(number)</span> <span class="param-docs">If specified, defines inputShape as <code>[inputDim]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the dense kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Dense</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.dropout" href="#layers.dropout"> tf.layers.dropout</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L533-L535" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Applies <a target="_blank" rel="noopener" href="http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf">dropout</a> to the input.</p> <p>Dropout consists in randomly setting a fraction <code>rate</code> of input units to 0 at each update during training time, which helps prevent overfitting.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">rate</span> <span class="param-type">(number)</span> <span class="param-docs">Float between 0 and 1. Fraction of the input units to drop.</span> </li> <li class="parameter config-param"> <span class="param-name">noiseShape</span> <span class="param-type">(number[])</span> <span class="param-docs">Integer array representing the shape of the binary dropout mask that will be multiplied with the input.</p> <p>For instance, if your inputs have shape <code>(batchSize, timesteps, features)</code> and you want the dropout mask to be the same for all timesteps, you can use <code>noise_shape=(batch_size, 1, features)</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">An integer to use as random seed.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Dropout</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.embedding" href="#layers.embedding"> tf.layers.embedding</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L684-L686" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Maps positive integers (indices) into dense vectors of fixed size. E.g. [[4], [20]] -&gt; [[0.25, 0.1], [0.6, -0.2]]</p> <p><em>Input shape:</em>* 2D tensor with shape: <code>[batchSize, sequenceLength]</code>.</p> <p><em>Output shape:</em>* 3D tensor with shape: <code>[batchSize, sequenceLength, outputDim]</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(number)</span> <span class="param-docs">Integer &gt; 0. Size of the vocabulary, i.e. maximum integer index + 1.</span> </li> <li class="parameter config-param"> <span class="param-name">outputDim</span> <span class="param-type">(number)</span> <span class="param-docs">Integer &gt;= 0. Dimension of the dense embedding.</span> </li> <li class="parameter config-param"> <span class="param-name">embeddingsInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>embeddings</code> matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">embeddingsRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>embeddings</code> matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">embeddingsConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>embeddings</code> matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">maskZero</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the input value 0 is a special &quot;padding&quot; value that should be masked out. This is useful when using recurrent layers which may take variable length input.</p> <p>If this is <code>True</code> then all subsequent layers in the model need to support masking or an exception will be raised. If maskZero is set to <code>True</code>, as a consequence, index 0 cannot be used in the vocabulary (inputDim should equal size of vocabulary + 1).</span> </li> <li class="parameter config-param"> <span class="param-name">inputLength</span> <span class="param-type">(number|number[])</span> <span class="param-docs">Length of input sequences, when it is constant.</p> <p>This argument is required if you are going to connect <code>flatten</code> then <code>dense</code> layers upstream (without it, the shape of the dense outputs cannot be computed).</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Embedding</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.flatten" href="#layers.flatten"> tf.layers.flatten</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L592-L594" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Flattens the input. Does not affect the batch size.</p> <p>A <code>Flatten</code> layer flattens each batch in its inputs to 1D (making the output 2D).</p> <p>For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">4</span>, <span class="hljs-number">3</span>]}); <span class="hljs-keyword">const</span> flattenLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">flatten</span>(); <span class="hljs-comment">// Inspect the inferred output shape of the flatten layer, which</span> <span class="hljs-comment">// equals `[null, 12]`. The 2nd dimension is 4 * 3, i.e., the result of the</span> <span class="hljs-comment">// flattening. (The 1st dimension is the undermined batch size.)</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(flattenLayer.<span class="hljs-title function_">apply</span>(input).<span class="hljs-property">shape</span>)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Image data format: channelsLast (default) or channelsFirst.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Flatten</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.permute" href="#layers.permute"> tf.layers.permute</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L669-L671" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Permutes the dimensions of the input according to a given pattern.</p> <p>Useful for, e.g., connecting RNNs and convnets together.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">permute</span>({ <span class="hljs-attr">dims</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">1</span>], <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">64</span>] })); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(model.<span class="hljs-property">outputShape</span>); <span class="hljs-comment">// Now model&#x27;s output shape is [null, 64, 10], where null is the</span> <span class="hljs-comment">// unpermuted sample (batch) dimension.</span> </code></pre> <p>Input shape: Arbitrary. Use the configuration field <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same rank as the input shape, but with the dimensions re-ordered (i.e., permuted) according to the <code>dims</code> configuration of this layer.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">dims</span> <span class="param-type">(number[])</span> <span class="param-docs">Array of integers. Permutation pattern. Does not include the sample (batch) dimension. Index starts at 1. For instance, <code>[2, 1]</code> permutes the first and second dimensions of the input.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Permute</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.repeatVector" href="#layers.repeatVector"> tf.layers.repeatVector</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L610-L612" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Repeats the input n times in a new dimension.</p> <pre class="hljs"><code class="hljs language-js"> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">repeatVector</span>({<span class="hljs-attr">n</span>: <span class="hljs-number">4</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">2</span>]})); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]]); <span class="hljs-comment">// Use the model to do inference on a data point the model hasn&#x27;t seen</span> model.<span class="hljs-title function_">predict</span>(x).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// output shape is now [batch, 2, 4]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">n</span> <span class="param-type">(number)</span> <span class="param-docs">The integer number of times to repeat the input.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">RepeatVector</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.reshape" href="#layers.reshape"> tf.layers.reshape</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L637-L639" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Reshapes an input to a certain shape.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">4</span>, <span class="hljs-number">3</span>]}); <span class="hljs-keyword">const</span> reshapeLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">reshape</span>({<span class="hljs-attr">targetShape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">6</span>]}); <span class="hljs-comment">// Inspect the inferred output shape of the Reshape layer, which</span> <span class="hljs-comment">// equals `[null, 2, 6]`. (The 1st dimension is the undermined batch size.)</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(reshapeLayer.<span class="hljs-title function_">apply</span>(input).<span class="hljs-property">shape</span>)); </code></pre> <p>Input shape: Arbitrary, although all dimensions in the input shape must be fixed. Use the configuration <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: [batchSize, targetShape[0], targetShape[1], ..., targetShape[targetShape.length - 1]].</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">targetShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">The target shape. Does not include the batch axis.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Reshape</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.spatialDropout1d" href="#layers.spatialDropout1d"> tf.layers.spatialDropout1d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L569-L571" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Spatial 1D version of Dropout.</p> <p>This Layer type performs the same function as the Dropout layer, but it drops entire 1D feature maps instead of individual elements. For example, if an input example consists of 3 timesteps and the feature map for each timestep has a size of 4, a <code>spatialDropout1d</code> layer may zero out the feature maps of the 1st timesteps and 2nd timesteps completely while sparing all feature elements of the 3rd timestep.</p> <p>If adjacent frames (timesteps) are strongly correlated (as is normally the case in early convolution layers), regular dropout will not regularize the activation and will otherwise just result in merely an effective learning rate decrease. In this case, <code>spatialDropout1d</code> will help promote independence among feature maps and should be used instead.</p> <p><em>Arguments:</em>* rate: A floating-point number &gt;=0 and &lt;=1. Fraction of the input elements to drop.</p> <p><em>Input shape:</em>* 3D tensor with shape <code>(samples, timesteps, channels)</code>.</p> <p><em>Output shape:</em>* Same as the input shape.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="https://arxiv.org/abs/1411.4280">Efficient Object Localization Using Convolutional Networks</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">rate</span> <span class="param-type">(number)</span> <span class="param-docs">Float between 0 and 1. Fraction of the input units to drop.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">An integer to use as random seed.</span> </li> <li class="parameter config-param"> <span class="param-name">input_shape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">batch_input_shape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">batch_size</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">input_dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">SpatialDropout1D</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Convolutional" href="#Layers-Convolutional" class="symbol-link"> Layers / Convolutional </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.conv1d" href="#layers.conv1d"> tf.layers.conv1d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L246-L248" target=_blank>Source</a> </span> </div> <div class="documentation"><p>1D convolution layer (e.g., temporal convolution).</p> <p>This layer creates a convolution kernel that is convolved with the layer input over a single spatial (or temporal) dimension to produce a tensor of outputs.</p> <p>If <code>use_bias</code> is True, a bias vector is created and added to the outputs.</p> <p>If <code>activation</code> is not <code>null</code>, it is applied to the outputs as well.</p> <p>When using this layer as the first layer in a model, provide an <code>inputShape</code> argument <code>Array</code> or <code>null</code>.</p> <p>For example, <code>inputShape</code> would be:</p> <ul> <li><code>[10, 128]</code> for sequences of 10 vectors of 128-dimensional vectors</li> <li><code>[null, 128]</code> for variable-length sequences of 128-dimensional vectors.</li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number]|[number, number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function of the layer.</p> <p>If you don't specify the activation, none is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the convolutional kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the convolutional kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Conv1D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.conv2d" href="#layers.conv2d"> tf.layers.conv2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L268-L270" target=_blank>Source</a> </span> </div> <div class="documentation"><p>2D convolution layer (e.g. spatial convolution over images).</p> <p>This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs.</p> <p>If <code>useBias</code> is True, a bias vector is created and added to the outputs.</p> <p>If <code>activation</code> is not <code>null</code>, it is applied to the outputs as well.</p> <p>When using this layer as the first layer in a model, provide the keyword argument <code>inputShape</code> (Array of integers, does not include the sample axis), e.g. <code>inputShape=[128, 128, 3]</code> for 128x128 RGB pictures in <code>dataFormat='channelsLast'</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number]|[number, number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function of the layer.</p> <p>If you don't specify the activation, none is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the convolutional kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the convolutional kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Conv2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.conv2dTranspose" href="#layers.conv2dTranspose"> tf.layers.conv2dTranspose</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L307-L309" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Transposed convolutional layer (sometimes called Deconvolution).</p> <p>The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution.</p> <p>When using this layer as the first layer in a model, provide the configuration <code>inputShape</code> (<code>Array</code> of integers, does not include the sample axis), e.g., <code>inputShape: [128, 128, 3]</code> for 128x128 RGB pictures in <code>dataFormat: 'channelsLast'</code>.</p> <p>Input shape: 4D tensor with shape: <code>[batch, channels, rows, cols]</code> if <code>dataFormat</code> is <code>'channelsFirst'</code>. or 4D tensor with shape <code>[batch, rows, cols, channels]</code> if <code>dataFormat</code> is <code>'channelsLast'</code>.</p> <p>Output shape: 4D tensor with shape: <code>[batch, filters, newRows, newCols]</code> if <code>dataFormat</code> is <code>'channelsFirst'</code>. or 4D tensor with shape: <code>[batch, newRows, newCols, filters]</code> if <code>dataFormat</code> is <code>'channelsLast'</code>.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="https://arxiv.org/abs/1603.07285v1">A guide to convolution arithmetic for deep learning</a></li> <li><a target="_blank" rel="noopener" href="http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf">Deconvolutional Networks</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number]|[number, number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function of the layer.</p> <p>If you don't specify the activation, none is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the convolutional kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the convolutional kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Conv2DTranspose</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.conv3d" href="#layers.conv3d"> tf.layers.conv3d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L329-L331" target=_blank>Source</a> </span> </div> <div class="documentation"><p>3D convolution layer (e.g. spatial convolution over volumes).</p> <p>This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs.</p> <p>If <code>useBias</code> is True, a bias vector is created and added to the outputs.</p> <p>If <code>activation</code> is not <code>null</code>, it is applied to the outputs as well.</p> <p>When using this layer as the first layer in a model, provide the keyword argument <code>inputShape</code> (Array of integers, does not include the sample axis), e.g. <code>inputShape=[128, 128, 128, 1]</code> for 128x128x128 grayscale volumes in <code>dataFormat='channelsLast'</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number]|[number, number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function of the layer.</p> <p>If you don't specify the activation, none is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the convolutional kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the convolutional kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Conv3D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.cropping2D" href="#layers.cropping2D"> tf.layers.cropping2D</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L401-L403" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Cropping layer for 2D input (e.g., image).</p> <p>This layer can crop an input at the top, bottom, left and right side of an image tensor.</p> <p>Input shape: 4D tensor with shape:</p> <ul> <li>If <code>dataFormat</code> is <code>&quot;channelsLast&quot;</code>: <code>[batch, rows, cols, channels]</code></li> <li>If <code>data_format</code> is <code>&quot;channels_first&quot;</code>: <code>[batch, channels, rows, cols]</code>.</li> </ul> <p>Output shape: 4D with shape:</p> <ul> <li>If <code>dataFormat</code> is <code>&quot;channelsLast&quot;</code>: <code>[batch, croppedRows, croppedCols, channels]</code> - If <code>dataFormat</code> is <code>&quot;channelsFirst&quot;</code>: <code>[batch, channels, croppedRows, croppedCols]</code>.</li> </ul> <p>Examples</p> <pre class="hljs"><code class="hljs language-js"> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">cropping2D</span>({<span class="hljs-attr">cropping</span>:[[<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]], <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">128</span>, <span class="hljs-number">128</span>, <span class="hljs-number">3</span>]})); <span class="hljs-comment">//now output shape is [batch, 124, 124, 3]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">cropping</span> <span class="param-type">(number|[number, number]|[[number, number], [number, number]])</span> <span class="param-docs">Dimension of the cropping along the width and the height.</p> <ul> <li>If integer: the same symmetric cropping is applied to width and height.</li> <li>If list of 2 integers: interpreted as two different symmetric cropping values for height and width: <code>[symmetric_height_crop, symmetric_width_crop]</code>.</li> <li>If a list of 2 lists of 2 integers: interpreted as <code>[[top_crop, bottom_crop], [left_crop, right_crop]]</code></li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code></p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Cropping2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.depthwiseConv2d" href="#layers.depthwiseConv2d"> tf.layers.depthwiseConv2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L445-L447" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Depthwise separable 2D convolution.</p> <p>Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution (which acts on each input channel separately). The <code>depthMultiplier</code> argument controls how many output channels are generated per input channel in the depthwise step.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">An integer or Array of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.</span> </li> <li class="parameter config-param"> <span class="param-name">depthMultiplier</span> <span class="param-type">(number)</span> <span class="param-docs">The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to <code>filtersIn * depthMultiplier</code>. Default: 1.</span> </li> <li class="parameter config-param"> <span class="param-name">depthwiseInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the depthwise kernel matrix. Default: GlorotNormal.</span> </li> <li class="parameter config-param"> <span class="param-name">depthwiseConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the depthwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">depthwiseRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function for the depthwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number]|[number, number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function of the layer.</p> <p>If you don't specify the activation, none is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the convolutional kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the convolutional kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">DepthwiseConv2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.separableConv2d" href="#layers.separableConv2d"> tf.layers.separableConv2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L366-L368" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Depthwise separable 2D convolution.</p> <p>Separable convolution consists of first performing a depthwise spatial convolution (which acts on each input channel separately) followed by a pointwise convolution which mixes together the resulting output channels. The <code>depthMultiplier</code> argument controls how many output channels are generated per input channel in the depthwise step.</p> <p>Intuitively, separable convolutions can be understood as a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block.</p> <p>Input shape: 4D tensor with shape: <code>[batch, channels, rows, cols]</code> if data_format='channelsFirst' or 4D tensor with shape: <code>[batch, rows, cols, channels]</code> if data_format='channelsLast'.</p> <p>Output shape: 4D tensor with shape: <code>[batch, filters, newRows, newCols]</code> if data_format='channelsFirst' or 4D tensor with shape: <code>[batch, newRows, newCols, filters]</code> if data_format='channelsLast'. <code>rows</code> and <code>cols</code> values might have changed due to padding.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">depthMultiplier</span> <span class="param-type">(number)</span> <span class="param-docs">The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to <code>filtersIn * depthMultiplier</code>. Default: 1.</span> </li> <li class="parameter config-param"> <span class="param-name">depthwiseInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the depthwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">pointwiseInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the pointwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">depthwiseRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the depthwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">pointwiseRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the pointwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">depthwiseConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the depthwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">pointwiseConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the pointwise kernel matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number]|[number, number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function of the layer.</p> <p>If you don't specify the activation, none is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the convolutional kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the convolutional kernel weights.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">activityRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the activation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">SeparableConv2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.upSampling2d" href="#layers.upSampling2d"> tf.layers.upSampling2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L429-L431" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Upsampling layer for 2D inputs.</p> <p>Repeats the rows and columns of the data by size[0] and size[1] respectively.</p> <p>Input shape: 4D tensor with shape: - If <code>dataFormat</code> is <code>&quot;channelsLast&quot;</code>: <code>[batch, rows, cols, channels]</code> - If <code>dataFormat</code> is <code>&quot;channelsFirst&quot;</code>: <code>[batch, channels, rows, cols]</code></p> <p>Output shape: 4D tensor with shape: - If <code>dataFormat</code> is <code>&quot;channelsLast&quot;</code>: <code>[batch, upsampledRows, upsampledCols, channels]</code> - If <code>dataFormat</code> is <code>&quot;channelsFirst&quot;</code>: <code>[batch, channels, upsampledRows, upsampledCols]</code></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">size</span> <span class="param-type">(number[])</span> <span class="param-docs">The upsampling factors for rows and columns.</p> <p>Defaults to <code>[2, 2]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>&quot;channelsLast&quot;</code> corresponds to inputs with shape <code>[batch, ..., channels]</code></p> <p><code>&quot;channelsFirst&quot;</code> corresponds to inputs with shape <code>[batch, channels, ...]</code>.</p> <p>Defaults to <code>&quot;channelsLast&quot;</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">interpolation</span> <span class="param-type">(InterpolationFormat)</span> <span class="param-docs">The interpolation mechanism, one of <code>&quot;nearest&quot;</code> or <code>&quot;bilinear&quot;</code>, default to <code>&quot;nearest&quot;</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">UpSampling2D</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Merge" href="#Layers-Merge" class="symbol-link"> Layers / Merge </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.add" href="#layers.add"> tf.layers.add</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L710-L712" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that performs element-wise addition on an <code>Array</code> of inputs.</p> <p>It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). The inputs are specified as an <code>Array</code> when the <code>apply</code> method of the <code>Add</code> layer instance is called. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> addLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">add</span>(); <span class="hljs-keyword">const</span> sum = addLayer.<span class="hljs-title function_">apply</span>([input1, input2]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(sum.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// You get [null, 2, 2], with the first dimension as the undetermined batch</span> <span class="hljs-comment">// dimension.</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Add</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.average" href="#layers.average"> tf.layers.average</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L732-L734" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that performs element-wise averaging on an <code>Array</code> of inputs.</p> <p>It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> averageLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">average</span>(); <span class="hljs-keyword">const</span> average = averageLayer.<span class="hljs-title function_">apply</span>([input1, input2]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(average.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// You get [null, 2, 2], with the first dimension as the undetermined batch</span> <span class="hljs-comment">// dimension.</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Average</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.concatenate" href="#layers.concatenate"> tf.layers.concatenate</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L756-L758" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that concatenates an <code>Array</code> of inputs.</p> <p>It takes a list of tensors, all of the same shape except for the concatenation axis, and returns a single tensor, the concatenation of all inputs. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]}); <span class="hljs-keyword">const</span> concatLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">concatenate</span>(); <span class="hljs-keyword">const</span> output = concatLayer.<span class="hljs-title function_">apply</span>([input1, input2]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// You get [null, 2, 5], with the first dimension as the undetermined batch</span> <span class="hljs-comment">// dimension. The last dimension (5) is the result of concatenating the</span> <span class="hljs-comment">// last dimensions of the inputs (2 and 3).</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">Axis along which to concatenate.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Concatenate</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.dot" href="#layers.dot"> tf.layers.dot</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L849-L851" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that computes a dot product between samples in two tensors.</p> <p>E.g., if applied to a list of two tensors <code>a</code> and <code>b</code> both of shape <code>[batchSize, n]</code>, the output will be a tensor of shape <code>[batchSize, 1]</code>, where each entry at index <code>[i, 0]</code> will be the dot product between <code>a[i, :]</code> and <code>b[i, :]</code>.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> dotLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dot</span>({<span class="hljs-attr">axes</span>: -<span class="hljs-number">1</span>}); <span class="hljs-keyword">const</span> x1 = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">10</span>, <span class="hljs-number">20</span>], [<span class="hljs-number">30</span>, <span class="hljs-number">40</span>]]); <span class="hljs-keyword">const</span> x2 = tf.<span class="hljs-title function_">tensor2d</span>([[-<span class="hljs-number">1</span>, -<span class="hljs-number">2</span>], [-<span class="hljs-number">3</span>, -<span class="hljs-number">4</span>]]); <span class="hljs-comment">// Invoke the layer&#x27;s apply() method in eager (imperative) mode.</span> <span class="hljs-keyword">const</span> y = dotLayer.<span class="hljs-title function_">apply</span>([x1, x2]); y.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">axes</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">Axis or axes along which the dot product will be taken.</p> <p>Integer or an Array of integers.</span> </li> <li class="parameter config-param"> <span class="param-name">normalize</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to L2-normalize samples along the dot product axis before taking the dot product.</p> <p>If set to <code>true</code>, the output of the dot product is the cosine proximity between the two samples.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Dot</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.maximum" href="#layers.maximum"> tf.layers.maximum</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L778-L780" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that computes the element-wise maximum of an <code>Array</code> of inputs.</p> <p>It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> maxLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">maximum</span>(); <span class="hljs-keyword">const</span> max = maxLayer.<span class="hljs-title function_">apply</span>([input1, input2]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(max.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// You get [null, 2, 2], with the first dimension as the undetermined batch</span> <span class="hljs-comment">// dimension.</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Maximum</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.minimum" href="#layers.minimum"> tf.layers.minimum</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L800-L802" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that computes the element-wise minimum of an <code>Array</code> of inputs.</p> <p>It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> minLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">minimum</span>(); <span class="hljs-keyword">const</span> min = minLayer.<span class="hljs-title function_">apply</span>([input1, input2]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(min.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// You get [null, 2, 2], with the first dimension as the undetermined batch</span> <span class="hljs-comment">// dimension.</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Minimum</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.multiply" href="#layers.multiply"> tf.layers.multiply</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L823-L825" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer that multiplies (element-wise) an <code>Array</code> of inputs.</p> <p>It takes as input an Array of tensors, all of the same shape, and returns a single tensor (also of the same shape). For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input1 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input2 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> input3 = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> multiplyLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">multiply</span>(); <span class="hljs-keyword">const</span> product = multiplyLayer.<span class="hljs-title function_">apply</span>([input1, input2, input3]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(product.<span class="hljs-property">shape</span>); <span class="hljs-comment">// You get [null, 2, 2], with the first dimension as the undetermined batch</span> <span class="hljs-comment">// dimension.</span></code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Multiply</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Normalization" href="#Layers-Normalization" class="symbol-link"> Layers / Normalization </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.batchNormalization" href="#layers.batchNormalization"> tf.layers.batchNormalization</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L876-L878" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Batch normalization layer (Ioffe and Szegedy, 2014).</p> <p>Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.</p> <p>Input shape: Arbitrary. Use the keyword argument <code>inputShape</code> (Array of integers, does not include the sample axis) when calling the constructor of this class, if this layer is used as a first layer in a model.</p> <p>Output shape: Same shape as input.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="https://arxiv.org/abs/1502.03167">Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The integer axis that should be normalized (typically the features axis). Defaults to -1.</p> <p>For instance, after a <code>Conv2D</code> layer with <code>data_format=&quot;channels_first&quot;</code>, set <code>axis=1</code> in <code>batchNormalization</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">momentum</span> <span class="param-type">(number)</span> <span class="param-docs">Momentum of the moving average. Defaults to 0.99.</span> </li> <li class="parameter config-param"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">Small float added to the variance to avoid dividing by zero. Defaults to 1e-3.</span> </li> <li class="parameter config-param"> <span class="param-name">center</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, add offset of <code>beta</code> to normalized tensor. If <code>false</code>, <code>beta</code> is ignored. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">scale</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, multiply by <code>gamma</code>. If <code>false</code>, <code>gamma</code> is not used. When the next layer is linear (also e.g. <code>nn.relu</code>), this can be disabled since the scaling will be done by the next layer. Defaults to <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">betaInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the beta weight. Defaults to 'zeros'.</span> </li> <li class="parameter config-param"> <span class="param-name">gammaInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the gamma weight. Defaults to <code>ones</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">movingMeanInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the moving mean. Defaults to <code>zeros</code></span> </li> <li class="parameter config-param"> <span class="param-name">movingVarianceInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the moving variance. Defaults to 'Ones'.</span> </li> <li class="parameter config-param"> <span class="param-name">betaConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for the beta weight.</span> </li> <li class="parameter config-param"> <span class="param-name">gammaConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint for gamma weight.</span> </li> <li class="parameter config-param"> <span class="param-name">betaRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer for the beta weight.</span> </li> <li class="parameter config-param"> <span class="param-name">gammaRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer for the gamma weight.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">BatchNormalization</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.layerNormalization" href="#layers.layerNormalization"> tf.layers.layerNormalization</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L900-L902" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Layer-normalization layer (Ba et al., 2016).</p> <p>Normalizes the activations of the previous layer for each given example in a batch independently, instead of across a batch like in <code>batchNormalization</code>. In other words, this layer applies a transformation that maintains the mean activation within each example close to 0 and activation variance close to 1.</p> <p>Input shape: Arbitrary. Use the argument <code>inputShape</code> when using this layer as the first layer in a model.</p> <p>Output shape: Same as input.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="https://arxiv.org/abs/1607.06450">Layer Normalization</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The axis or axes that should be normalized (typically, the feature axis). Defaults to -1 (the last axis).</span> </li> <li class="parameter config-param"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">A small positive float added to variance to avoid division by zero. Defaults to 1e-3.</span> </li> <li class="parameter config-param"> <span class="param-name">center</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, add offset of <code>beta</code> to normalized tensor. If <code>false</code>, <code>beta</code> is ignored. Default: <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">scale</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, multiply output by <code>gamma</code>. If <code>false</code>, <code>gamma</code> is not used. When the next layer is linear, this can be disabled since scaling will be done by the next layer. Default: <code>true</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">betaInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the beta weight. Default: <code>'zeros'</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">gammaInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the gamma weight. Default: <code>'ones'</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">betaRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer for the beta weight.</span> </li> <li class="parameter config-param"> <span class="param-name">gammaRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer for the gamma weight.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">LayerNormalization</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Pooling" href="#Layers-Pooling" class="symbol-link"> Layers / Pooling </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.averagePooling1d" href="#layers.averagePooling1d"> tf.layers.averagePooling1d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L945-L947" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Average pooling operation for spatial data.</p> <p>Input shape: <code>[batchSize, inLength, channels]</code></p> <p>Output shape: <code>[batchSize, pooledLength, channels]</code></p> <p><code>tf.avgPool1d</code> is an alias.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">poolSize</span> <span class="param-type">(number|[number])</span> <span class="param-docs">Size of the window to pool over, should be an integer.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|[number])</span> <span class="param-docs">Period at which to sample the pooled values.</p> <p>If <code>null</code>, defaults to <code>poolSize</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">How to fill in data that's not an integer multiple of poolSize.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">AveragePooling1D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.averagePooling2d" href="#layers.averagePooling2d"> tf.layers.averagePooling2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L980-L982" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Average pooling operation for spatial data.</p> <p>Input shape:</p> <ul> <li>If <code>dataFormat === CHANNEL_LAST</code>: 4D tensor with shape: <code>[batchSize, rows, cols, channels]</code></li> <li>If <code>dataFormat === CHANNEL_FIRST</code>: 4D tensor with shape: <code>[batchSize, channels, rows, cols]</code></li> </ul> <p>Output shape</p> <ul> <li>If <code>dataFormat === CHANNEL_LAST</code>: 4D tensor with shape: <code>[batchSize, pooledRows, pooledCols, channels]</code></li> <li>If <code>dataFormat === CHANNEL_FIRST</code>: 4D tensor with shape: <code>[batchSize, channels, pooledRows, pooledCols]</code></li> </ul> <p><code>tf.avgPool2d</code> is an alias.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">poolSize</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">Factors by which to downscale in each dimension [vertical, horizontal]. Expects an integer or an array of 2 integers.</p> <p>For example, <code>[2, 2]</code> will halve the input in both spatial dimensions. If only one integer is specified, the same window length will be used for both dimensions.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">The size of the stride in each dimension of the pooling window. Expects an integer or an array of 2 integers. Integer, tuple of 2 integers, or None.</p> <p>If <code>null</code>, defaults to <code>poolSize</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">The padding type to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">The data format to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">AveragePooling2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.averagePooling3d" href="#layers.averagePooling3d"> tf.layers.averagePooling3d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1013-L1015" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Average pooling operation for 3D data.</p> <p>Input shape</p> <ul> <li>If <code>dataFormat === channelsLast</code>: 5D tensor with shape: <code>[batchSize, depths, rows, cols, channels]</code></li> <li>If <code>dataFormat === channelsFirst</code>: 4D tensor with shape: <code>[batchSize, channels, depths, rows, cols]</code></li> </ul> <p>Output shape</p> <ul> <li>If <code>dataFormat=channelsLast</code>: 5D tensor with shape: <code>[batchSize, pooledDepths, pooledRows, pooledCols, channels]</code></li> <li>If <code>dataFormat=channelsFirst</code>: 5D tensor with shape: <code>[batchSize, channels, pooledDepths, pooledRows, pooledCols]</code></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">poolSize</span> <span class="param-type">(number|[number, number, number])</span> <span class="param-docs">Factors by which to downscale in each dimension [depth, height, width]. Expects an integer or an array of 3 integers.</p> <p>For example, <code>[2, 2, 2]</code> will halve the input in three dimensions. If only one integer is specified, the same window length will be used for all dimensions.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|[number, number, number])</span> <span class="param-docs">The size of the stride in each dimension of the pooling window. Expects an integer or an array of 3 integers. Integer, tuple of 3 integers, or None.</p> <p>If <code>null</code>, defaults to <code>poolSize</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">The padding type to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">The data format to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">AveragePooling3D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.globalAveragePooling1d" href="#layers.globalAveragePooling1d"> tf.layers.globalAveragePooling1d</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1034-L1036" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Global average pooling operation for temporal data.</p> <p>Input Shape: 3D tensor with shape: <code>[batchSize, steps, features]</code>.</p> <p>Output Shape: 2D tensor with shape: <code>[batchSize, features]</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GlobalAveragePooling1D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.globalAveragePooling2d" href="#layers.globalAveragePooling2d"> tf.layers.globalAveragePooling2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1052-L1054" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Global average pooling operation for spatial data.</p> <p>Input shape:</p> <ul> <li>If <code>dataFormat</code> is <code>CHANNEL_LAST</code>: 4D tensor with shape: <code>[batchSize, rows, cols, channels]</code>.</li> <li>If <code>dataFormat</code> is <code>CHANNEL_FIRST</code>: 4D tensor with shape: <code>[batchSize, channels, rows, cols]</code>.</li> </ul> <p>Output shape: 2D tensor with shape: <code>[batchSize, channels]</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">One of <code>CHANNEL_LAST</code> (default) or <code>CHANNEL_FIRST</code>.</p> <p>The ordering of the dimensions in the inputs. <code>CHANNEL_LAST</code> corresponds to inputs with shape <code>[batch, height, width, channels]</code> while <code>CHANNEL_FIRST</code> corresponds to inputs with shape <code>[batch, channels, height, width]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GlobalAveragePooling2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.globalMaxPooling1d" href="#layers.globalMaxPooling1d"> tf.layers.globalMaxPooling1d</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1065-L1067" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Global max pooling operation for temporal data.</p> <p>Input Shape: 3D tensor with shape: <code>[batchSize, steps, features]</code>.</p> <p>Output Shape: 2D tensor with shape: <code>[batchSize, features]</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GlobalMaxPooling1D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.globalMaxPooling2d" href="#layers.globalMaxPooling2d"> tf.layers.globalMaxPooling2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1083-L1085" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Global max pooling operation for spatial data.</p> <p>Input shape:</p> <ul> <li>If <code>dataFormat</code> is <code>CHANNEL_LAST</code>: 4D tensor with shape: <code>[batchSize, rows, cols, channels]</code>.</li> <li>If <code>dataFormat</code> is <code>CHANNEL_FIRST</code>: 4D tensor with shape: <code>[batchSize, channels, rows, cols]</code>.</li> </ul> <p>Output shape: 2D tensor with shape: <code>[batchSize, channels]</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">One of <code>CHANNEL_LAST</code> (default) or <code>CHANNEL_FIRST</code>.</p> <p>The ordering of the dimensions in the inputs. <code>CHANNEL_LAST</code> corresponds to inputs with shape <code>[batch, height, width, channels]</code> while <code>CHANNEL_FIRST</code> corresponds to inputs with shape <code>[batch, channels, height, width]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GlobalMaxPooling2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.maxPooling1d" href="#layers.maxPooling1d"> tf.layers.maxPooling1d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1096-L1098" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Max pooling operation for temporal data.</p> <p>Input shape: <code>[batchSize, inLength, channels]</code></p> <p>Output shape: <code>[batchSize, pooledLength, channels]</code></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">poolSize</span> <span class="param-type">(number|[number])</span> <span class="param-docs">Size of the window to pool over, should be an integer.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|[number])</span> <span class="param-docs">Period at which to sample the pooled values.</p> <p>If <code>null</code>, defaults to <code>poolSize</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">How to fill in data that's not an integer multiple of poolSize.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">MaxPooling1D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.maxPooling2d" href="#layers.maxPooling2d"> tf.layers.maxPooling2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1121-L1123" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Max pooling operation for spatial data.</p> <p>Input shape</p> <ul> <li>If <code>dataFormat === CHANNEL_LAST</code>: 4D tensor with shape: <code>[batchSize, rows, cols, channels]</code></li> <li>If <code>dataFormat === CHANNEL_FIRST</code>: 4D tensor with shape: <code>[batchSize, channels, rows, cols]</code></li> </ul> <p>Output shape</p> <ul> <li>If <code>dataFormat=CHANNEL_LAST</code>: 4D tensor with shape: <code>[batchSize, pooledRows, pooledCols, channels]</code></li> <li>If <code>dataFormat=CHANNEL_FIRST</code>: 4D tensor with shape: <code>[batchSize, channels, pooledRows, pooledCols]</code></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">poolSize</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">Factors by which to downscale in each dimension [vertical, horizontal]. Expects an integer or an array of 2 integers.</p> <p>For example, <code>[2, 2]</code> will halve the input in both spatial dimensions. If only one integer is specified, the same window length will be used for both dimensions.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">The size of the stride in each dimension of the pooling window. Expects an integer or an array of 2 integers. Integer, tuple of 2 integers, or None.</p> <p>If <code>null</code>, defaults to <code>poolSize</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">The padding type to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">The data format to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">MaxPooling2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.maxPooling3d" href="#layers.maxPooling3d"> tf.layers.maxPooling3d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1146-L1148" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Max pooling operation for 3D data.</p> <p>Input shape</p> <ul> <li>If <code>dataFormat === channelsLast</code>: 5D tensor with shape: <code>[batchSize, depths, rows, cols, channels]</code></li> <li>If <code>dataFormat === channelsFirst</code>: 5D tensor with shape: <code>[batchSize, channels, depths, rows, cols]</code></li> </ul> <p>Output shape</p> <ul> <li>If <code>dataFormat=channelsLast</code>: 5D tensor with shape: <code>[batchSize, pooledDepths, pooledRows, pooledCols, channels]</code></li> <li>If <code>dataFormat=channelsFirst</code>: 5D tensor with shape: <code>[batchSize, channels, pooledDepths, pooledRows, pooledCols]</code></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">poolSize</span> <span class="param-type">(number|[number, number, number])</span> <span class="param-docs">Factors by which to downscale in each dimension [depth, height, width]. Expects an integer or an array of 3 integers.</p> <p>For example, <code>[2, 2, 2]</code> will halve the input in three dimensions. If only one integer is specified, the same window length will be used for all dimensions.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|[number, number, number])</span> <span class="param-docs">The size of the stride in each dimension of the pooling window. Expects an integer or an array of 3 integers. Integer, tuple of 3 integers, or None.</p> <p>If <code>null</code>, defaults to <code>poolSize</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">The padding type to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">The data format to use for the pooling layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">MaxPooling3D</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Recurrent" href="#Layers-Recurrent" class="symbol-link"> Layers / Recurrent </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.convLstm2d" href="#layers.convLstm2d"> tf.layers.convLstm2d</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1408-L1410" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Convolutional LSTM layer - Xingjian Shi 2015.</p> <p>This is a <code>ConvRNN2D</code> layer consisting of one <code>ConvLSTM2DCell</code>. However, unlike the underlying <code>ConvLSTM2DCell</code>, the <code>apply</code> method of <code>ConvLSTM2D</code> operates on a sequence of inputs. The shape of the input (not including the first, batch dimension) needs to be 4-D, with the first dimension being time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> filters = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> kernelSize = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> batchSize = <span class="hljs-number">4</span>; <span class="hljs-keyword">const</span> sequenceLength = <span class="hljs-number">2</span>; <span class="hljs-keyword">const</span> size = <span class="hljs-number">5</span>; <span class="hljs-keyword">const</span> channels = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> inputShape = [batchSize, sequenceLength, size, size, channels]; <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">ones</span>(inputShape); <span class="hljs-keyword">const</span> layer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">convLstm2d</span>({filters, kernelSize}); <span class="hljs-keyword">const</span> output = layer.<span class="hljs-title function_">apply</span>(input); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Activation function to use.</p> <p>Defaults to hyperbolic tangent (<code>tanh</code>)</p> <p>If you pass <code>null</code>, no activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Regularizer function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Constraint function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Constraint function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentActivation</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Activation function to use for the recurrent step.</p> <p>Defaults to hard sigmoid (<code>hardSigmoid</code>).</p> <p>If <code>null</code>, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">unitForgetBias</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>true</code>, add 1 to the bias of the forget gate at initialization. Setting it to <code>true</code> will also force <code>biasInitializer = 'zeros'</code>. This is recommended in <a target="_blank" rel="noopener" href="http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf">Jozefowicz et al.</a></span> </li> <li class="parameter config-param"> <span class="param-name">implementation</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications.</p> <p>Note: For superior performance, TensorFlow.js always uses implementation 2, regardless of the actual value of this config field.</span> </li> <li class="parameter config-param"> <span class="param-name">returnSequences</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Whether to return the last output in the output sequence, or the full sequence.</span> </li> <li class="parameter config-param"> <span class="param-name">returnState</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Whether to return the last state in addition to the output.</span> </li> <li class="parameter config-param"> <span class="param-name">goBackwards</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>true</code>, process the input sequence backwards and return the reversed sequence (default: <code>false</code>).</span> </li> <li class="parameter config-param"> <span class="param-name">stateful</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>true</code>, the last state for each sample at index i in a batch will be used as initial state of the sample of index i in the following batch (default: <code>false</code>).</p> <p>You can set RNN layers to be &quot;stateful&quot;, which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches.</p> <p>To enable &quot;statefulness&quot;:</p> <ul> <li>specify <code>stateful: true</code> in the layer constructor.</li> <li>specify a fixed batch size for your model, by passing <ul> <li>if sequential model: <code>batchInputShape: [...]</code> to the first layer in your model.</li> <li>else for functional model with 1 or more Input layers: <code>batchShape: [...]</code> to all the first layers in your model. This is the expected shape of your inputs <em>including the batch size</em>. It should be a tuple of integers, e.g., <code>[32, 10, 100]</code>.</li> </ul> </li> <li>specify <code>shuffle: false</code> when calling <code>LayersModel.fit()</code>.</li> </ul> <p>To reset the state of your model, call <code>resetStates()</code> on either the specific layer or on the entire model.</span> </li> <li class="parameter config-param"> <span class="param-name">unroll</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>true</code>, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences (default: <code>false</code>). Porting Note: tfjs-layers has an imperative backend. RNNs are executed with normal TypeScript control flow. Hence this property is inapplicable and ignored in tfjs-layers.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Dimensionality of the input (integer). This option (or alternatively, the option <code>inputShape</code>) is required when this layer is used as the first layer in a model.</span> </li> <li class="parameter config-param"> <span class="param-name">inputLength</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Length of the input sequences, to be specified when it is constant. This argument is required if you are going to connect <code>Flatten</code> then <code>Dense</code> layers upstream (without it, the shape of the dense outputs cannot be computed). Note that if the recurrent layer is not the first layer in your model, you would need to specify the input length at the level of the first layer (e.g., via the <code>inputShape</code> option).</span> </li> <li class="parameter config-param"> <span class="param-name">cell</span> <span class="param-type">(<a href="#class:RNNCell">tf.RNNCell</a>|<a href="#class:RNNCell">tf.RNNCell</a>[])</span> <span class="param-docs">A RNN cell instance. A RNN cell is a class that has:</p> <ul> <li>a <code>call()</code> method, which takes <code>[Tensor, Tensor]</code> as the first input argument. The first item is the input at time t, and second item is the cell state at time t. The <code>call()</code> method returns <code>[outputAtT, statesAtTPlus1]</code>. The <code>call()</code> method of the cell can also take the argument <code>constants</code>, see section &quot;Note on passing external constants&quot; below. Porting Node: PyKeras overrides the <code>call()</code> signature of RNN cells, which are Layer subtypes, to accept two arguments. tfjs-layers does not do such overriding. Instead we preserve the <code>call()</code> signature, which due to its <code>Tensor|Tensor[]</code> argument and return value is flexible enough to handle the inputs and states.</li> <li>a <code>stateSize</code> attribute. This can be a single integer (single state) in which case it is the size of the recurrent state (which should be the same as the size of the cell output). This can also be an Array of integers (one size per state). In this case, the first entry (<code>stateSize[0]</code>) should be the same as the size of the cell output. It is also possible for <code>cell</code> to be a list of RNN cell instances, in which case the cells get stacked on after the other in the RNN, implementing an efficient stacked RNN.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">ConvLSTM2D</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.convLstm2dCell" href="#layers.convLstm2dCell"> tf.layers.convLstm2dCell</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1445-L1447" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Cell class for <code>ConvLSTM2D</code>.</p> <p><code>ConvLSTM2DCell</code> is distinct from the <code>ConvRNN2D</code> subclass <code>ConvLSTM2D</code> in that its <code>call</code> method takes the input data of only a single time step and returns the cell's output at the time step, while <code>ConvLSTM2D</code> takes the input data over a number of time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> filters = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> kernelSize = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> sequenceLength = <span class="hljs-number">1</span>; <span class="hljs-keyword">const</span> size = <span class="hljs-number">5</span>; <span class="hljs-keyword">const</span> channels = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> inputShape = [sequenceLength, size, size, channels]; <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">ones</span>(inputShape); <span class="hljs-keyword">const</span> cell = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">convLstm2dCell</span>({filters, kernelSize}); cell.<span class="hljs-title function_">build</span>(input.<span class="hljs-property">shape</span>); <span class="hljs-keyword">const</span> outputSize = size - kernelSize + <span class="hljs-number">1</span>; <span class="hljs-keyword">const</span> outShape = [sequenceLength, outputSize, outputSize, filters]; <span class="hljs-keyword">const</span> initialH = tf.<span class="hljs-title function_">zeros</span>(outShape); <span class="hljs-keyword">const</span> initialC = tf.<span class="hljs-title function_">zeros</span>(outShape); <span class="hljs-keyword">const</span> [o, h, c] = cell.<span class="hljs-title function_">call</span>([input, initialH, initialC], {}); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Activation function to use. Default: hyperbolic tangent ('tanh'). If you pass <code>null</code>, 'linear' activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Regularizer function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Regularizer function applied to the <code>recurrent_kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Constraint function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Constraint function applied to the <code>recurrentKernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentActivation</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Activation function to use for the recurrent step.</p> <p>Defaults to hard sigmoid (<code>hardSigmoid</code>).</p> <p>If <code>null</code>, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">unitForgetBias</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">If <code>true</code>, add 1 to the bias of the forget gate at initialization. Setting it to <code>true</code> will also force <code>biasInitializer = 'zeros'</code>. This is recommended in <a target="_blank" rel="noopener" href="http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf">Jozefowicz et al.</a></span> </li> <li class="parameter config-param"> <span class="param-name">implementation</span> <span class="param-type">(<a href="#any">tf.any()</a>)</span> <span class="param-docs">Implementation mode, either 1 or 2.</p> <p>Mode 1 will structure its operations as a larger number of smaller dot products and additions.</p> <p>Mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications.</p> <p>Note: For superior performance, TensorFlow.js always uses implementation 2, regardless of the actual value of this configuration field.</span> </li> <li class="parameter config-param"> <span class="param-name">filters</span> <span class="param-type">(number)</span> <span class="param-docs">The dimensionality of the output space (i.e. the number of filters in the convolution).</span> </li> <li class="parameter config-param"> <span class="param-name">kernelSize</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimensions of the convolution window. If kernelSize is a number, the convolutional window will be square.</span> </li> <li class="parameter config-param"> <span class="param-name">strides</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The strides of the convolution in each dimension. If strides is a number, strides in both dimensions are equal.</p> <p>Specifying any stride value != 1 is incompatible with specifying any <code>dilationRate</code> value != 1.</span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">('valid'|'same'|'causal')</span> <span class="param-docs">Padding mode.</span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">Format of the data, which determines the ordering of the dimensions in the inputs.</p> <p><code>channels_last</code> corresponds to inputs with shape <code>(batch, ..., channels)</code></p> <p><code>channels_first</code> corresponds to inputs with shape <code>(batch, channels, ...)</code>.</p> <p>Defaults to <code>channels_last</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">dilationRate</span> <span class="param-type">(number|[number]|[number, number])</span> <span class="param-docs">The dilation rate to use for the dilated convolution in each dimension. Should be an integer or array of two or three integers.</p> <p>Currently, specifying any <code>dilationRate</code> value != 1 is incompatible with specifying any <code>strides</code> value != 1.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">ConvLSTM2DCell</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.gru" href="#layers.gru"> tf.layers.gru</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1175-L1177" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Gated Recurrent Unit - Cho et al. 2014.</p> <p>This is an <code>RNN</code> layer consisting of one <code>GRUCell</code>. However, unlike the underlying <code>GRUCell</code>, the <code>apply</code> method of <code>SimpleRNN</code> operates on a sequence of inputs. The shape of the input (not including the first, batch dimension) needs to be at least 2-D, with the first dimension being time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> rnn = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">gru</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>, <span class="hljs-attr">returnSequences</span>: <span class="hljs-literal">true</span>}); <span class="hljs-comment">// Create an input with 10 time steps.</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> output = rnn.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the</span> <span class="hljs-comment">// same as the sequence length of `input`, due to `returnSequences`: `true`;</span> <span class="hljs-comment">// 3rd dimension is the `GRUCell`&#x27;s number of units.</span></code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">recurrentActivation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use for the recurrent step.</p> <p>Defaults to hard sigmoid (<code>hardSigmoid</code>).</p> <p>If <code>null</code>, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">implementation</span> <span class="param-type">(number)</span> <span class="param-docs">Implementation mode, either 1 or 2.</p> <p>Mode 1 will structure its operations as a larger number of smaller dot products and additions.</p> <p>Mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications.</p> <p>Note: For superior performance, TensorFlow.js always uses implementation 2, regardless of the actual value of this configuration field.</span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use.</p> <p>Defaults to hyperbolic tangent (<code>tanh</code>)</p> <p>If you pass <code>null</code>, no activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(number)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(number)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(Function)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">cell</span> <span class="param-type">(<a href="#class:RNNCell">tf.RNNCell</a>|<a href="#class:RNNCell">tf.RNNCell</a>[])</span> <span class="param-docs">A RNN cell instance. A RNN cell is a class that has:</p> <ul> <li>a <code>call()</code> method, which takes <code>[Tensor, Tensor]</code> as the first input argument. The first item is the input at time t, and second item is the cell state at time t. The <code>call()</code> method returns <code>[outputAtT, statesAtTPlus1]</code>. The <code>call()</code> method of the cell can also take the argument <code>constants</code>, see section &quot;Note on passing external constants&quot; below. Porting Node: PyKeras overrides the <code>call()</code> signature of RNN cells, which are Layer subtypes, to accept two arguments. tfjs-layers does not do such overriding. Instead we preserve the <code>call()</code> signature, which due to its <code>Tensor|Tensor[]</code> argument and return value is flexible enough to handle the inputs and states.</li> <li>a <code>stateSize</code> attribute. This can be a single integer (single state) in which case it is the size of the recurrent state (which should be the same as the size of the cell output). This can also be an Array of integers (one size per state). In this case, the first entry (<code>stateSize[0]</code>) should be the same as the size of the cell output. It is also possible for <code>cell</code> to be a list of RNN cell instances, in which case the cells get stacked on after the other in the RNN, implementing an efficient stacked RNN.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">returnSequences</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last output in the output sequence, or the full sequence.</span> </li> <li class="parameter config-param"> <span class="param-name">returnState</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last state in addition to the output.</span> </li> <li class="parameter config-param"> <span class="param-name">goBackwards</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, process the input sequence backwards and return the reversed sequence (default: <code>false</code>).</span> </li> <li class="parameter config-param"> <span class="param-name">stateful</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the last state for each sample at index i in a batch will be used as initial state of the sample of index i in the following batch (default: <code>false</code>).</p> <p>You can set RNN layers to be &quot;stateful&quot;, which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches.</p> <p>To enable &quot;statefulness&quot;:</p> <ul> <li>specify <code>stateful: true</code> in the layer constructor.</li> <li>specify a fixed batch size for your model, by passing <ul> <li>if sequential model: <code>batchInputShape: [...]</code> to the first layer in your model.</li> <li>else for functional model with 1 or more Input layers: <code>batchShape: [...]</code> to all the first layers in your model. This is the expected shape of your inputs <em>including the batch size</em>. It should be a tuple of integers, e.g., <code>[32, 10, 100]</code>.</li> </ul> </li> <li>specify <code>shuffle: false</code> when calling <code>LayersModel.fit()</code>.</li> </ul> <p>To reset the state of your model, call <code>resetStates()</code> on either the specific layer or on the entire model.</span> </li> <li class="parameter config-param"> <span class="param-name">unroll</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences (default: <code>false</code>). Porting Note: tfjs-layers has an imperative backend. RNNs are executed with normal TypeScript control flow. Hence this property is inapplicable and ignored in tfjs-layers.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(number)</span> <span class="param-docs">Dimensionality of the input (integer). This option (or alternatively, the option <code>inputShape</code>) is required when this layer is used as the first layer in a model.</span> </li> <li class="parameter config-param"> <span class="param-name">inputLength</span> <span class="param-type">(number)</span> <span class="param-docs">Length of the input sequences, to be specified when it is constant. This argument is required if you are going to connect <code>Flatten</code> then <code>Dense</code> layers upstream (without it, the shape of the dense outputs cannot be computed). Note that if the recurrent layer is not the first layer in your model, you would need to specify the input length at the level of the first layer (e.g., via the <code>inputShape</code> option).</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GRU</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.gruCell" href="#layers.gruCell"> tf.layers.gruCell</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1224-L1226" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Cell class for <code>GRU</code>.</p> <p><code>GRUCell</code> is distinct from the <code>RNN</code> subclass <code>GRU</code> in that its <code>apply</code> method takes the input data of only a single time step and returns the cell's output at the time step, while <code>GRU</code> takes the input data over a number of time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cell = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">gruCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">2</span>}); <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>]}); <span class="hljs-keyword">const</span> output = cell.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10]: This is the cell&#x27;s output at a single time step. The 1st</span> <span class="hljs-comment">// dimension is the unknown batch size.</span> </code></pre> <p>Instance(s) of <code>GRUCell</code> can be used to construct <code>RNN</code> layers. The most typical use of this workflow is to combine a number of cells into a stacked RNN cell (i.e., <code>StackedRNNCell</code> internally) and use it to create an RNN. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cells = [ tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">gruCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>}), tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">gruCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>}), ]; <span class="hljs-keyword">const</span> rnn = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">rnn</span>({<span class="hljs-attr">cell</span>: cells, <span class="hljs-attr">returnSequences</span>: <span class="hljs-literal">true</span>}); <span class="hljs-comment">// Create an input with 10 time steps and a length-20 vector at each step.</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> output = rnn.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the</span> <span class="hljs-comment">// same as the sequence length of `input`, due to `returnSequences`: `true`;</span> <span class="hljs-comment">// 3rd dimension is the last `gruCell`&#x27;s number of units.</span> </code></pre> <p>To create an <code>RNN</code> consisting of only <em>one</em> <code>GRUCell</code>, use the <a href="#layers.gru">tf.layers.gru()</a>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">recurrentActivation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use for the recurrent step.</p> <p>Defaults to hard sigmoid (<code>hardSigmoid</code>).</p> <p>If <code>null</code>, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">implementation</span> <span class="param-type">(number)</span> <span class="param-docs">Implementation mode, either 1 or 2.</p> <p>Mode 1 will structure its operations as a larger number of smaller dot products and additions.</p> <p>Mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications.</p> <p>Note: For superior performance, TensorFlow.js always uses implementation 2, regardless of the actual value of this configuration field.</span> </li> <li class="parameter config-param"> <span class="param-name">resetAfter</span> <span class="param-type">(boolean)</span> <span class="param-docs">GRU convention (whether to apply reset gate after or before matrix multiplication). false = &quot;before&quot;, true = &quot;after&quot; (only false is supported).</span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">units: Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use. Default: hyperbolic tangent ('tanh'). If you pass <code>null</code>, 'linear' activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>recurrent_kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>recurrentKernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(number)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(number)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(Function)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GRUCell</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.lstm" href="#layers.lstm"> tf.layers.lstm</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1251-L1253" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Long-Short Term Memory layer - Hochreiter 1997.</p> <p>This is an <code>RNN</code> layer consisting of one <code>LSTMCell</code>. However, unlike the underlying <code>LSTMCell</code>, the <code>apply</code> method of <code>LSTM</code> operates on a sequence of inputs. The shape of the input (not including the first, batch dimension) needs to be at least 2-D, with the first dimension being time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> lstm = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">lstm</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>, <span class="hljs-attr">returnSequences</span>: <span class="hljs-literal">true</span>}); <span class="hljs-comment">// Create an input with 10 time steps.</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> output = lstm.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the</span> <span class="hljs-comment">// same as the sequence length of `input`, due to `returnSequences`: `true`;</span> <span class="hljs-comment">// 3rd dimension is the `LSTMCell`&#x27;s number of units.</span></code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">recurrentActivation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use for the recurrent step.</p> <p>Defaults to hard sigmoid (<code>hardSigmoid</code>).</p> <p>If <code>null</code>, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">unitForgetBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, add 1 to the bias of the forget gate at initialization. Setting it to <code>true</code> will also force <code>biasInitializer = 'zeros'</code>. This is recommended in <a target="_blank" rel="noopener" href="http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf">Jozefowicz et al.</a></span> </li> <li class="parameter config-param"> <span class="param-name">implementation</span> <span class="param-type">(number)</span> <span class="param-docs">Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications.</p> <p>Note: For superior performance, TensorFlow.js always uses implementation 2, regardless of the actual value of this config field.</span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use.</p> <p>Defaults to hyperbolic tangent (<code>tanh</code>)</p> <p>If you pass <code>null</code>, no activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(number)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(number)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(Function)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">cell</span> <span class="param-type">(<a href="#class:RNNCell">tf.RNNCell</a>|<a href="#class:RNNCell">tf.RNNCell</a>[])</span> <span class="param-docs">A RNN cell instance. A RNN cell is a class that has:</p> <ul> <li>a <code>call()</code> method, which takes <code>[Tensor, Tensor]</code> as the first input argument. The first item is the input at time t, and second item is the cell state at time t. The <code>call()</code> method returns <code>[outputAtT, statesAtTPlus1]</code>. The <code>call()</code> method of the cell can also take the argument <code>constants</code>, see section &quot;Note on passing external constants&quot; below. Porting Node: PyKeras overrides the <code>call()</code> signature of RNN cells, which are Layer subtypes, to accept two arguments. tfjs-layers does not do such overriding. Instead we preserve the <code>call()</code> signature, which due to its <code>Tensor|Tensor[]</code> argument and return value is flexible enough to handle the inputs and states.</li> <li>a <code>stateSize</code> attribute. This can be a single integer (single state) in which case it is the size of the recurrent state (which should be the same as the size of the cell output). This can also be an Array of integers (one size per state). In this case, the first entry (<code>stateSize[0]</code>) should be the same as the size of the cell output. It is also possible for <code>cell</code> to be a list of RNN cell instances, in which case the cells get stacked on after the other in the RNN, implementing an efficient stacked RNN.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">returnSequences</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last output in the output sequence, or the full sequence.</span> </li> <li class="parameter config-param"> <span class="param-name">returnState</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last state in addition to the output.</span> </li> <li class="parameter config-param"> <span class="param-name">goBackwards</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, process the input sequence backwards and return the reversed sequence (default: <code>false</code>).</span> </li> <li class="parameter config-param"> <span class="param-name">stateful</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the last state for each sample at index i in a batch will be used as initial state of the sample of index i in the following batch (default: <code>false</code>).</p> <p>You can set RNN layers to be &quot;stateful&quot;, which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches.</p> <p>To enable &quot;statefulness&quot;:</p> <ul> <li>specify <code>stateful: true</code> in the layer constructor.</li> <li>specify a fixed batch size for your model, by passing <ul> <li>if sequential model: <code>batchInputShape: [...]</code> to the first layer in your model.</li> <li>else for functional model with 1 or more Input layers: <code>batchShape: [...]</code> to all the first layers in your model. This is the expected shape of your inputs <em>including the batch size</em>. It should be a tuple of integers, e.g., <code>[32, 10, 100]</code>.</li> </ul> </li> <li>specify <code>shuffle: false</code> when calling <code>LayersModel.fit()</code>.</li> </ul> <p>To reset the state of your model, call <code>resetStates()</code> on either the specific layer or on the entire model.</span> </li> <li class="parameter config-param"> <span class="param-name">unroll</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences (default: <code>false</code>). Porting Note: tfjs-layers has an imperative backend. RNNs are executed with normal TypeScript control flow. Hence this property is inapplicable and ignored in tfjs-layers.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(number)</span> <span class="param-docs">Dimensionality of the input (integer). This option (or alternatively, the option <code>inputShape</code>) is required when this layer is used as the first layer in a model.</span> </li> <li class="parameter config-param"> <span class="param-name">inputLength</span> <span class="param-type">(number)</span> <span class="param-docs">Length of the input sequences, to be specified when it is constant. This argument is required if you are going to connect <code>Flatten</code> then <code>Dense</code> layers upstream (without it, the shape of the dense outputs cannot be computed). Note that if the recurrent layer is not the first layer in your model, you would need to specify the input length at the level of the first layer (e.g., via the <code>inputShape</code> option).</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">LSTM</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.lstmCell" href="#layers.lstmCell"> tf.layers.lstmCell</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1300-L1302" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Cell class for <code>LSTM</code>.</p> <p><code>LSTMCell</code> is distinct from the <code>RNN</code> subclass <code>LSTM</code> in that its <code>apply</code> method takes the input data of only a single time step and returns the cell's output at the time step, while <code>LSTM</code> takes the input data over a number of time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cell = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">lstmCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">2</span>}); <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>]}); <span class="hljs-keyword">const</span> output = cell.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10]: This is the cell&#x27;s output at a single time step. The 1st</span> <span class="hljs-comment">// dimension is the unknown batch size.</span> </code></pre> <p>Instance(s) of <code>LSTMCell</code> can be used to construct <code>RNN</code> layers. The most typical use of this workflow is to combine a number of cells into a stacked RNN cell (i.e., <code>StackedRNNCell</code> internally) and use it to create an RNN. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cells = [ tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">lstmCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>}), tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">lstmCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>}), ]; <span class="hljs-keyword">const</span> rnn = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">rnn</span>({<span class="hljs-attr">cell</span>: cells, <span class="hljs-attr">returnSequences</span>: <span class="hljs-literal">true</span>}); <span class="hljs-comment">// Create an input with 10 time steps and a length-20 vector at each step.</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> output = rnn.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the</span> <span class="hljs-comment">// same as the sequence length of `input`, due to `returnSequences`: `true`;</span> <span class="hljs-comment">// 3rd dimension is the last `lstmCell`&#x27;s number of units.</span> </code></pre> <p>To create an <code>RNN</code> consisting of only <em>one</em> <code>LSTMCell</code>, use the <a href="#layers.lstm">tf.layers.lstm()</a>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">recurrentActivation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use for the recurrent step.</p> <p>Defaults to hard sigmoid (<code>hardSigmoid</code>).</p> <p>If <code>null</code>, no activation is applied.</span> </li> <li class="parameter config-param"> <span class="param-name">unitForgetBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, add 1 to the bias of the forget gate at initialization. Setting it to <code>true</code> will also force <code>biasInitializer = 'zeros'</code>. This is recommended in <a target="_blank" rel="noopener" href="http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf">Jozefowicz et al.</a></span> </li> <li class="parameter config-param"> <span class="param-name">implementation</span> <span class="param-type">(number)</span> <span class="param-docs">Implementation mode, either 1 or 2.</p> <p>Mode 1 will structure its operations as a larger number of smaller dot products and additions.</p> <p>Mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications.</p> <p>Note: For superior performance, TensorFlow.js always uses implementation 2, regardless of the actual value of this configuration field.</span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">units: Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use. Default: hyperbolic tangent ('tanh'). If you pass <code>null</code>, 'linear' activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>recurrent_kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>recurrentKernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(number)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(number)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(Function)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">LSTMCell</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.rnn" href="#layers.rnn"> tf.layers.rnn</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1509-L1511" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Base class for recurrent layers.</p> <p>Input shape: 3D tensor with shape <code>[batchSize, timeSteps, inputDim]</code>.</p> <p>Output shape:</p> <ul> <li>if <code>returnState</code>, an Array of tensors (i.e., <a href="#class:Tensor">tf.Tensor</a>s). The first tensor is the output. The remaining tensors are the states at the last time step, each with shape <code>[batchSize, units]</code>.</li> <li>if <code>returnSequences</code>, the output will have shape <code>[batchSize, timeSteps, units]</code>.</li> <li>else, the output will have shape <code>[batchSize, units]</code>.</li> </ul> <p>Masking: This layer supports masking for input data with a variable number of timesteps. To introduce masks to your data, use an embedding layer with the <code>mask_zero</code> parameter set to <code>True</code>.</p> <p>Notes on using statefulness in RNNs: You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches.</p> <p>To enable statefulness: - specify <code>stateful: true</code> in the layer constructor. - specify a fixed batch size for your model, by passing if sequential model: <code>batchInputShape=[...]</code> to the first layer in your model. else for functional model with 1 or more Input layers: <code>batchShape=[...]</code> to all the first layers in your model. This is the expected shape of your inputs <em>including the batch size</em>. It should be a tuple of integers, e.g. <code>(32, 10, 100)</code>. - specify <code>shuffle=False</code> when calling fit().</p> <p>To reset the states of your model, call <code>.resetStates()</code> on either a specific layer, or on your entire model.</p> <p>Note on specifying the initial state of RNNs You can specify the initial state of RNN layers symbolically by calling them with the option <code>initialState</code>. The value of <code>initialState</code> should be a tensor or list of tensors representing the initial state of the RNN layer.</p> <p>You can specify the initial state of RNN layers numerically by calling <code>resetStates</code> with the keyword argument <code>states</code>. The value of <code>states</code> should be a numpy array or list of numpy arrays representing the initial state of the RNN layer.</p> <p>Note on passing external constants to RNNs You can pass &quot;external&quot; constants to the cell using the <code>constants</code> keyword argument of <code>RNN.call</code> method. This requires that the <code>cell.call</code> method accepts the same keyword argument <code>constants</code>. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">cell</span> <span class="param-type">(<a href="#class:RNNCell">tf.RNNCell</a>|<a href="#class:RNNCell">tf.RNNCell</a>[])</span> <span class="param-docs">A RNN cell instance. A RNN cell is a class that has:</p> <ul> <li>a <code>call()</code> method, which takes <code>[Tensor, Tensor]</code> as the first input argument. The first item is the input at time t, and second item is the cell state at time t. The <code>call()</code> method returns <code>[outputAtT, statesAtTPlus1]</code>. The <code>call()</code> method of the cell can also take the argument <code>constants</code>, see section &quot;Note on passing external constants&quot; below. Porting Node: PyKeras overrides the <code>call()</code> signature of RNN cells, which are Layer subtypes, to accept two arguments. tfjs-layers does not do such overriding. Instead we preserve the <code>call()</code> signature, which due to its <code>Tensor|Tensor[]</code> argument and return value is flexible enough to handle the inputs and states.</li> <li>a <code>stateSize</code> attribute. This can be a single integer (single state) in which case it is the size of the recurrent state (which should be the same as the size of the cell output). This can also be an Array of integers (one size per state). In this case, the first entry (<code>stateSize[0]</code>) should be the same as the size of the cell output. It is also possible for <code>cell</code> to be a list of RNN cell instances, in which case the cells get stacked on after the other in the RNN, implementing an efficient stacked RNN.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">returnSequences</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last output in the output sequence, or the full sequence.</span> </li> <li class="parameter config-param"> <span class="param-name">returnState</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last state in addition to the output.</span> </li> <li class="parameter config-param"> <span class="param-name">goBackwards</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, process the input sequence backwards and return the reversed sequence (default: <code>false</code>).</span> </li> <li class="parameter config-param"> <span class="param-name">stateful</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the last state for each sample at index i in a batch will be used as initial state of the sample of index i in the following batch (default: <code>false</code>).</p> <p>You can set RNN layers to be &quot;stateful&quot;, which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches.</p> <p>To enable &quot;statefulness&quot;:</p> <ul> <li>specify <code>stateful: true</code> in the layer constructor.</li> <li>specify a fixed batch size for your model, by passing <ul> <li>if sequential model: <code>batchInputShape: [...]</code> to the first layer in your model.</li> <li>else for functional model with 1 or more Input layers: <code>batchShape: [...]</code> to all the first layers in your model. This is the expected shape of your inputs <em>including the batch size</em>. It should be a tuple of integers, e.g., <code>[32, 10, 100]</code>.</li> </ul> </li> <li>specify <code>shuffle: false</code> when calling <code>LayersModel.fit()</code>.</li> </ul> <p>To reset the state of your model, call <code>resetStates()</code> on either the specific layer or on the entire model.</span> </li> <li class="parameter config-param"> <span class="param-name">unroll</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences (default: <code>false</code>). Porting Note: tfjs-layers has an imperative backend. RNNs are executed with normal TypeScript control flow. Hence this property is inapplicable and ignored in tfjs-layers.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(number)</span> <span class="param-docs">Dimensionality of the input (integer). This option (or alternatively, the option <code>inputShape</code>) is required when this layer is used as the first layer in a model.</span> </li> <li class="parameter config-param"> <span class="param-name">inputLength</span> <span class="param-type">(number)</span> <span class="param-docs">Length of the input sequences, to be specified when it is constant. This argument is required if you are going to connect <code>Flatten</code> then <code>Dense</code> layers upstream (without it, the shape of the dense outputs cannot be computed). Note that if the recurrent layer is not the first layer in your model, you would need to specify the input length at the level of the first layer (e.g., via the <code>inputShape</code> option).</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">RNN</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.simpleRNN" href="#layers.simpleRNN"> tf.layers.simpleRNN</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1328-L1330" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Fully-connected RNN where the output is to be fed back to input.</p> <p>This is an <code>RNN</code> layer consisting of one <code>SimpleRNNCell</code>. However, unlike the underlying <code>SimpleRNNCell</code>, the <code>apply</code> method of <code>SimpleRNN</code> operates on a sequence of inputs. The shape of the input (not including the first, batch dimension) needs to be at least 2-D, with the first dimension being time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> rnn = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">simpleRNN</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>, <span class="hljs-attr">returnSequences</span>: <span class="hljs-literal">true</span>}); <span class="hljs-comment">// Create an input with 10 time steps.</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> output = rnn.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the</span> <span class="hljs-comment">// same as the sequence length of `input`, due to `returnSequences`: `true`;</span> <span class="hljs-comment">// 3rd dimension is the `SimpleRNNCell`&#x27;s number of units.</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use.</p> <p>Defaults to hyperbolic tangent (<code>tanh</code>)</p> <p>If you pass <code>null</code>, no activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the kernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the recurrentKernel weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(number)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(number)</span> <span class="param-docs">Number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(Function)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">cell</span> <span class="param-type">(<a href="#class:RNNCell">tf.RNNCell</a>|<a href="#class:RNNCell">tf.RNNCell</a>[])</span> <span class="param-docs">A RNN cell instance. A RNN cell is a class that has:</p> <ul> <li>a <code>call()</code> method, which takes <code>[Tensor, Tensor]</code> as the first input argument. The first item is the input at time t, and second item is the cell state at time t. The <code>call()</code> method returns <code>[outputAtT, statesAtTPlus1]</code>. The <code>call()</code> method of the cell can also take the argument <code>constants</code>, see section &quot;Note on passing external constants&quot; below. Porting Node: PyKeras overrides the <code>call()</code> signature of RNN cells, which are Layer subtypes, to accept two arguments. tfjs-layers does not do such overriding. Instead we preserve the <code>call()</code> signature, which due to its <code>Tensor|Tensor[]</code> argument and return value is flexible enough to handle the inputs and states.</li> <li>a <code>stateSize</code> attribute. This can be a single integer (single state) in which case it is the size of the recurrent state (which should be the same as the size of the cell output). This can also be an Array of integers (one size per state). In this case, the first entry (<code>stateSize[0]</code>) should be the same as the size of the cell output. It is also possible for <code>cell</code> to be a list of RNN cell instances, in which case the cells get stacked on after the other in the RNN, implementing an efficient stacked RNN.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">returnSequences</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last output in the output sequence, or the full sequence.</span> </li> <li class="parameter config-param"> <span class="param-name">returnState</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the last state in addition to the output.</span> </li> <li class="parameter config-param"> <span class="param-name">goBackwards</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, process the input sequence backwards and return the reversed sequence (default: <code>false</code>).</span> </li> <li class="parameter config-param"> <span class="param-name">stateful</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the last state for each sample at index i in a batch will be used as initial state of the sample of index i in the following batch (default: <code>false</code>).</p> <p>You can set RNN layers to be &quot;stateful&quot;, which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches.</p> <p>To enable &quot;statefulness&quot;:</p> <ul> <li>specify <code>stateful: true</code> in the layer constructor.</li> <li>specify a fixed batch size for your model, by passing <ul> <li>if sequential model: <code>batchInputShape: [...]</code> to the first layer in your model.</li> <li>else for functional model with 1 or more Input layers: <code>batchShape: [...]</code> to all the first layers in your model. This is the expected shape of your inputs <em>including the batch size</em>. It should be a tuple of integers, e.g., <code>[32, 10, 100]</code>.</li> </ul> </li> <li>specify <code>shuffle: false</code> when calling <code>LayersModel.fit()</code>.</li> </ul> <p>To reset the state of your model, call <code>resetStates()</code> on either the specific layer or on the entire model.</span> </li> <li class="parameter config-param"> <span class="param-name">unroll</span> <span class="param-type">(boolean)</span> <span class="param-docs">If <code>true</code>, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences (default: <code>false</code>). Porting Note: tfjs-layers has an imperative backend. RNNs are executed with normal TypeScript control flow. Hence this property is inapplicable and ignored in tfjs-layers.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDim</span> <span class="param-type">(number)</span> <span class="param-docs">Dimensionality of the input (integer). This option (or alternatively, the option <code>inputShape</code>) is required when this layer is used as the first layer in a model.</span> </li> <li class="parameter config-param"> <span class="param-name">inputLength</span> <span class="param-type">(number)</span> <span class="param-docs">Length of the input sequences, to be specified when it is constant. This argument is required if you are going to connect <code>Flatten</code> then <code>Dense</code> layers upstream (without it, the shape of the dense outputs cannot be computed). Note that if the recurrent layer is not the first layer in your model, you would need to specify the input length at the level of the first layer (e.g., via the <code>inputShape</code> option).</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">SimpleRNN</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.simpleRNNCell" href="#layers.simpleRNNCell"> tf.layers.simpleRNNCell</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1377-L1379" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Cell class for <code>SimpleRNN</code>.</p> <p><code>SimpleRNNCell</code> is distinct from the <code>RNN</code> subclass <code>SimpleRNN</code> in that its <code>apply</code> method takes the input data of only a single time step and returns the cell's output at the time step, while <code>SimpleRNN</code> takes the input data over a number of time steps. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cell = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">simpleRNNCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">2</span>}); <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>]}); <span class="hljs-keyword">const</span> output = cell.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10]: This is the cell&#x27;s output at a single time step. The 1st</span> <span class="hljs-comment">// dimension is the unknown batch size.</span> </code></pre> <p>Instance(s) of <code>SimpleRNNCell</code> can be used to construct <code>RNN</code> layers. The most typical use of this workflow is to combine a number of cells into a stacked RNN cell (i.e., <code>StackedRNNCell</code> internally) and use it to create an RNN. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cells = [ tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">simpleRNNCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">4</span>}), tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">simpleRNNCell</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>}), ]; <span class="hljs-keyword">const</span> rnn = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">rnn</span>({<span class="hljs-attr">cell</span>: cells, <span class="hljs-attr">returnSequences</span>: <span class="hljs-literal">true</span>}); <span class="hljs-comment">// Create an input with 10 time steps and a length-20 vector at each step.</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]}); <span class="hljs-keyword">const</span> output = rnn.<span class="hljs-title function_">apply</span>(input); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the</span> <span class="hljs-comment">// same as the sequence length of `input`, due to `returnSequences`: `true`;</span> <span class="hljs-comment">// 3rd dimension is the last `SimpleRNNCell`&#x27;s number of units.</span> </code></pre> <p>To create an <code>RNN</code> consisting of only <em>one</em> <code>SimpleRNNCell</code>, use the <a href="#layers.simpleRNN">tf.layers.simpleRNN()</a>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">units</span> <span class="param-type">(number)</span> <span class="param-docs">units: Positive integer, dimensionality of the output space.</span> </li> <li class="parameter config-param"> <span class="param-name">activation</span> <span class="param-type">('elu'|'hardSigmoid'|'linear'|'relu'|'relu6'| 'selu'|'sigmoid'|'softmax'|'softplus'|'softsign'|'tanh'|'swish'|'mish'|'gelu'|'gelu_new')</span> <span class="param-docs">Activation function to use. Default: hyperbolic tangent ('tanh'). If you pass <code>null</code>, 'linear' activation will be applied.</span> </li> <li class="parameter config-param"> <span class="param-name">useBias</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the layer uses a bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the <code>recurrentKernel</code> weights matrix, used for linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">biasInitializer</span> <span class="param-type">('constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'| 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'| 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string|<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">Initializer for the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the <code>recurrent_kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasRegularizer</span> <span class="param-type">('l1l2'|string|Regularizer)</span> <span class="param-docs">Regularizer function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">kernelConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>kernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the <code>recurrentKernel</code> weights matrix.</span> </li> <li class="parameter config-param"> <span class="param-name">biasConstraint</span> <span class="param-type">('maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string|<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">Constraint function applied to the bias vector.</span> </li> <li class="parameter config-param"> <span class="param-name">dropout</span> <span class="param-type">(number)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</span> </li> <li class="parameter config-param"> <span class="param-name">recurrentDropout</span> <span class="param-type">(number)</span> <span class="param-docs">Float number between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</span> </li> <li class="parameter config-param"> <span class="param-name">dropoutFunc</span> <span class="param-type">(Function)</span> <span class="param-docs">This is added for test DI purpose.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">SimpleRNNCell</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.stackedRNNCells" href="#layers.stackedRNNCells"> tf.layers.stackedRNNCells</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1520-L1522" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Wrapper allowing a stack of RNN cells to behave as a single cell.</p> <p>Used to implement efficient stacked RNNs.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">cells</span> <span class="param-type">(<a href="#class:RNNCell">tf.RNNCell</a>[])</span> <span class="param-docs">An <code>Array</code> of <code>RNNCell</code> instances.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">StackedRNNCells</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Wrapper" href="#Layers-Wrapper" class="symbol-link"> Layers / Wrapper </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.bidirectional" href="#layers.bidirectional"> tf.layers.bidirectional</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1527-L1529" target=_blank>Source</a> </span> </div> <div class="documentation"></div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">layer</span> <span class="param-type">(RNN)</span> <span class="param-docs">The instance of an <code>RNN</code> layer to be wrapped.</span> </li> <li class="parameter config-param"> <span class="param-name">mergeMode</span> <span class="param-type">('sum'|'mul'|'concat'|'ave')</span> <span class="param-docs">Mode by which outputs of the forward and backward RNNs are combined. If <code>null</code> or <code>undefined</code>, the output will not be combined, they will be returned as an <code>Array</code>.</p> <p>If <code>undefined</code> (i.e., not provided), defaults to <code>'concat'</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Bidirectional</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.timeDistributed" href="#layers.timeDistributed"> tf.layers.timeDistributed</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1577-L1579" target=_blank>Source</a> </span> </div> <div class="documentation"><p>This wrapper applies a layer to every temporal slice of an input.</p> <p>The input should be at least 3D, and the dimension of the index <code>1</code> will be considered to be the temporal dimension.</p> <p>Consider a batch of 32 samples, where each sample is a sequence of 10 vectors of 16 dimensions. The batch input shape of the layer is then <code>[32, 10, 16]</code>, and the <code>inputShape</code>, not including the sample dimension, is <code>[10, 16]</code>.</p> <p>You can then use <code>TimeDistributed</code> to apply a <code>Dense</code> layer to each of the 10 timesteps, independently:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">timeDistributed</span>({ <span class="hljs-attr">layer</span>: tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">8</span>}), <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">16</span>], })); <span class="hljs-comment">// Now model.outputShape = [null, 10, 8].</span> <span class="hljs-comment">// The output will then have shape `[32, 10, 8]`.</span> <span class="hljs-comment">// In subsequent layers, there is no need for `inputShape`:</span> model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">timeDistributed</span>({<span class="hljs-attr">layer</span>: tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">32</span>})})); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(model.<span class="hljs-property">outputs</span>[<span class="hljs-number">0</span>].<span class="hljs-property">shape</span>)); <span class="hljs-comment">// Now model.outputShape = [null, 10, 32].</span> </code></pre> <p>The output will then have shape <code>[32, 10, 32]</code>.</p> <p><code>TimeDistributed</code> can be used with arbitrary layers, not just <code>Dense</code>, for instance a <code>Conv2D</code> layer.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">timeDistributed</span>({ <span class="hljs-attr">layer</span>: tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">conv2d</span>({<span class="hljs-attr">filters</span>: <span class="hljs-number">64</span>, <span class="hljs-attr">kernelSize</span>: [<span class="hljs-number">3</span>, <span class="hljs-number">3</span>]}), <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">10</span>, <span class="hljs-number">299</span>, <span class="hljs-number">299</span>, <span class="hljs-number">3</span>], })); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(model.<span class="hljs-property">outputs</span>[<span class="hljs-number">0</span>].<span class="hljs-property">shape</span>)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">layer</span> <span class="param-type">(<a href="#class:layers.Layer">tf.layers.Layer</a>)</span> <span class="param-docs">The layer to be wrapped.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">TimeDistributed</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Classes" href="#Layers-Classes" class="symbol-link"> Layers / Classes </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:layers.Layer" href="#class:layers.Layer">tf.layers.Layer</a> <span class="signature"> <span>extends serialization.Serializable</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L405-L1579" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A layer is a grouping of operations and weights that can be composed to create a <a href="#class:LayersModel">tf.LayersModel</a>.</p> <p>Layers are constructed by using the functions under the <a href="#Layers-Basic">tf.layers</a> namespace.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.apply" href="#tf.layers.Layer.apply"> apply</a> <span class="signature">(inputs, kwargs?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L947-L1082" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Builds or executes a <code>Layer</code>'s logic.</p> <p>When called with <a href="#class:Tensor">tf.Tensor</a>(s), execute the <code>Layer</code>'s computation and return Tensor(s). For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> denseLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({ <span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">kernelInitializer</span>: <span class="hljs-string">&#x27;zeros&#x27;</span>, <span class="hljs-attr">useBias</span>: <span class="hljs-literal">false</span> }); <span class="hljs-comment">// Invoke the layer&#x27;s apply() method with a [tf.Tensor](#class:Tensor) (with concrete</span> <span class="hljs-comment">// numeric values).</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">ones</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> output = denseLayer.<span class="hljs-title function_">apply</span>(input); <span class="hljs-comment">// The output&#x27;s value is expected to be [[0], [0]], due to the fact that</span> <span class="hljs-comment">// the dense layer has a kernel initialized to all-zeros and does not have</span> <span class="hljs-comment">// a bias.</span> output.<span class="hljs-title function_">print</span>(); </code></pre> <p>When called with <a href="#class:SymbolicTensor">tf.SymbolicTensor</a>(s), this will prepare the layer for future execution. This entails internal book-keeping on shapes of expected Tensors, wiring layers together, and initializing weights.</p> <p>Calling <code>apply</code> with <a href="#class:SymbolicTensor">tf.SymbolicTensor</a>s are typically used during the building of non-<a href="#class:Sequential">tf.Sequential</a> models. For example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> flattenLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">flatten</span>(); <span class="hljs-keyword">const</span> denseLayer = tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>}); <span class="hljs-comment">// Use tf.layers.input() to obtain a SymbolicTensor as input to apply().</span> <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">input</span>({<span class="hljs-attr">shape</span>: [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]}); <span class="hljs-keyword">const</span> output1 = flattenLayer.<span class="hljs-title function_">apply</span>(input); <span class="hljs-comment">// output1.shape is [null, 4]. The first dimension is the undetermined</span> <span class="hljs-comment">// batch size. The second dimension comes from flattening the [2, 2]</span> <span class="hljs-comment">// shape.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output1.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// The output SymbolicTensor of the flatten layer can be used to call</span> <span class="hljs-comment">// the apply() of the dense layer:</span> <span class="hljs-keyword">const</span> output2 = denseLayer.<span class="hljs-title function_">apply</span>(output1); <span class="hljs-comment">// output2.shape is [null, 1]. The first dimension is the undetermined</span> <span class="hljs-comment">// batch size. The second dimension matches the number of units of the</span> <span class="hljs-comment">// dense layer.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(output2.<span class="hljs-property">shape</span>)); <span class="hljs-comment">// The input and output can be used to construct a model that consists</span> <span class="hljs-comment">// of the flatten and dense layers.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">model</span>({<span class="hljs-attr">inputs</span>: input, <span class="hljs-attr">outputs</span>: output2}); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputs</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>|<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>[])</span> <span class="param-docs">a <a href="#class:Tensor">tf.Tensor</a> or <a href="#class:SymbolicTensor">tf.SymbolicTensor</a> or an Array of them.</span> </li> <li class="parameter"> <span class="param-name">kwargs</span> <span class="param-type">(Kwargs)</span> <span class="param-docs">Additional keyword arguments to be passed to <code>call()</code>.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>|<a href="#class:SymbolicTensor">tf.SymbolicTensor</a>[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.countParams" href="#tf.layers.Layer.countParams"> countParams</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1171-L1179" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Counts the total number of numbers (e.g., float32, int32) in the weights.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">number</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.build" href="#tf.layers.Layer.build"> build</a> <span class="signature">(inputShape)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1192-L1194" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates the layer weights.</p> <p>Must be implemented on all layers that have weights.</p> <p>Called when apply() is called to construct the weights.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[]|(null | number)[][])</span> <span class="param-docs">A <code>Shape</code> or array of <code>Shape</code> (unused).</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.getWeights" href="#tf.layers.Layer.getWeights"> getWeights</a> <span class="signature">(trainableOnly?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1204-L1206" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the current values of the weights of the layer.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">trainableOnly</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to get the values of only trainable weights.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.setWeights" href="#tf.layers.Layer.setWeights"> setWeights</a> <span class="signature">(weights)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1220-L1252" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Sets the weights of the layer, from Tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">a list of Tensors. The number of arrays and their shape must match number of the dimensions of the weights of the layer (i.e. it should match the output of <code>getWeights</code>).</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.addWeight" href="#tf.layers.Layer.addWeight"> addWeight</a> <span class="signature">(name, shape, dtype?, initializer?, regularizer?, trainable?, constraint?, getInitializerFunc?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1269-L1305" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Adds a weight variable to the layer.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of the new weight variable.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">The shape of the weight.</span> </li> <li class="parameter"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The dtype of the weight.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">initializer</span> <span class="param-type">(<a href="#class:initializers.Initializer">tf.initializers.Initializer</a>)</span> <span class="param-docs">An initializer instance.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">regularizer</span> <span class="param-type">(Regularizer)</span> <span class="param-docs">A regularizer instance.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weight should be trained via backprop or not (assuming that the layer itself is also trainable).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">constraint</span> <span class="param-type">(<a href="#class:constraints.Constraint">tf.constraints.Constraint</a>)</span> <span class="param-docs">An optional trainable.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">getInitializerFunc</span> <span class="param-type">(Function)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">LayerVariable</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.addLoss" href="#tf.layers.Layer.addLoss"> addLoss</a> <span class="signature">(losses)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1329-L1338" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Add losses to the layer.</p> <p>The loss may potentially be conditional on some inputs tensors, for instance activity losses are conditional on the layer's inputs.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">losses</span> <span class="param-type">(RegularizerFn|RegularizerFn[])</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.computeOutputShape" href="#tf.layers.Layer.computeOutputShape"> computeOutputShape</a> <span class="signature">(inputShape)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1351-L1353" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the output shape of the layer.</p> <p>Assumes that the layer will be built to match that input shape provided.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[]|(null | number)[][])</span> <span class="param-docs">A shape (tuple of integers) or a list of shape tuples (one per output tensor of the layer). Shape tuples can include null for free dimensions, instead of an integer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">(null | number)[]|(null | number)[][]</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.getConfig" href="#tf.layers.Layer.getConfig"> getConfig</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1499-L1509" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the config of the layer.</p> <p>A layer config is a TS dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration.</p> <p>The config of a layer does not include connectivity information, nor the layer class name. These are handled by 'Container' (one layer of abstraction above).</p> <p>Porting Note: The TS dictionary follows TS naming standards for keys, and uses tfjs-layers type-safe Enums. Serialization methods should use a helper function to convert to the pythonic storage standard. (see serialization_utils.convertTsToPythonic)</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">serialization.ConfigDict</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.layers.Layer.dispose" href="#tf.layers.Layer.dispose"> dispose</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/engine/topology.ts#L1557-L1578" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Attempt to dispose layer's weights.</p> <p>This method decreases the reference count of the Layer object by 1.</p> <p>A Layer is reference-counted. Its reference count is incremented by 1 the first item its <code>apply()</code> method is called and when it becomes a part of a new <code>Node</code> (through calling the <code>apply()</code> method on a <a href="#class:SymbolicTensor">tf.SymbolicTensor</a>).</p> <p>If the reference count of a Layer becomes 0, all the weights will be disposed and the underlying memory (e.g., the textures allocated in WebGL) will be freed.</p> <p>Note: If the reference count is greater than 0 after the decrement, the weights of the Layer will <em>not</em> be disposed.</p> <p>After a Layer is disposed, it cannot be used in calls such as <code>apply()</code>, <code>getWeights()</code> or <code>setWeights()</code> anymore.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">DisposeResult</span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:RNNCell" href="#class:RNNCell">tf.RNNCell</a> <span class="signature"> <span>extends <a href="#class:layers.Layer">tf.layers.Layer</a></span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/layers/recurrent.ts#L861-L871" target=_blank>Source</a> </span> </div> <div class="documentation"><p>An RNNCell layer.</p> </div> <div class="method-list"> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Inputs" href="#Layers-Inputs" class="symbol-link"> Layers / Inputs </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.inputLayer" href="#layers.inputLayer"> tf.layers.inputLayer</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L72-L74" target=_blank>Source</a> </span> </div> <div class="documentation"><p>An input layer is an entry point into a <a href="#class:LayersModel">tf.LayersModel</a>.</p> <p><code>InputLayer</code> is generated automatically for <a href="#class:Sequential">tf.Sequential</a> models by specifying the <code>inputshape</code> or <code>batchInputShape</code> for the first layer. It should not be specified explicitly. However, it can be useful sometimes, e.g., when constructing a sequential model from a subset of another sequential model's layers. Like the code snippet below shows.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Define a model which simply adds two inputs.</span> <span class="hljs-keyword">const</span> model1 = tf.<span class="hljs-title function_">sequential</span>(); model1.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">inputShape</span>: [<span class="hljs-number">4</span>], <span class="hljs-attr">units</span>: <span class="hljs-number">3</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;relu&#x27;</span>})); model1.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({<span class="hljs-attr">units</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;sigmoid&#x27;</span>})); model1.<span class="hljs-title function_">summary</span>(); model1.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">zeros</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>])).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// Construct another model, reusing the second layer of `model1` while</span> <span class="hljs-comment">// not using the first layer of `model1`. Note that you cannot add the second</span> <span class="hljs-comment">// layer of `model` directly as the first layer of the new sequential model,</span> <span class="hljs-comment">// because doing so will lead to an error related to the fact that the layer</span> <span class="hljs-comment">// is not an input layer. Instead, you need to create an `inputLayer` and add</span> <span class="hljs-comment">// it to the new sequential model before adding the reused layer.</span> <span class="hljs-keyword">const</span> model2 = tf.<span class="hljs-title function_">sequential</span>(); <span class="hljs-comment">// Use an inputShape that matches the input shape of `model1`&#x27;s second</span> <span class="hljs-comment">// layer.</span> model2.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">inputLayer</span>({<span class="hljs-attr">inputShape</span>: [<span class="hljs-number">3</span>]})); model2.<span class="hljs-title function_">add</span>(model1.<span class="hljs-property">layers</span>[<span class="hljs-number">1</span>]); model2.<span class="hljs-title function_">summary</span>(); model2.<span class="hljs-title function_">predict</span>(tf.<span class="hljs-title function_">zeros</span>([<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">Input shape, not including the batch axis.</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">Optional input batch size (integer or null).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">Batch input shape, including the batch axis.</span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Datatype of the input.</span> </li> <li class="parameter config-param"> <span class="param-name">sparse</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the placeholder created is meant to be sparse.</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name of the layer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">InputLayer</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Padding" href="#Layers-Padding" class="symbol-link"> Layers / Padding </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.zeroPadding2d" href="#layers.zeroPadding2d"> tf.layers.zeroPadding2d</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L928-L930" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Zero-padding layer for 2D input (e.g., image).</p> <p>This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor.</p> <p>Input shape: 4D tensor with shape:</p> <ul> <li>If <code>dataFormat</code> is <code>&quot;channelsLast&quot;</code>: <code>[batch, rows, cols, channels]</code></li> <li>If <code>data_format</code> is <code>&quot;channels_first&quot;</code>: <code>[batch, channels, rows, cols]</code>.</li> </ul> <p>Output shape: 4D with shape:</p> <ul> <li>If <code>dataFormat</code> is <code>&quot;channelsLast&quot;</code>: <code>[batch, paddedRows, paddedCols, channels]</code> - If <code>dataFormat</code> is <code>&quot;channelsFirst&quot;</code>: <code>[batch, channels, paddedRows, paddedCols]</code>.</li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">padding</span> <span class="param-type">(number|[number, number]|[[number, number], [number, number]])</span> <span class="param-docs">Integer, or <code>Array</code> of 2 integers, or <code>Array</code> of 2 <code>Array</code>s, each of which is an <code>Array</code> of 2 integers.</p> <ul> <li>If integer, the same symmetric padding is applied to width and height.</li> <li>If <code>Array</code> of 2 integers, interpreted as two different symmetric values for height and width: <code>[symmetricHeightPad, symmetricWidthPad]</code>.</li> <li>If <code>Array</code> of 2 <code>Array</code>s, interpreted as: <code>[[topPad, bottomPad], [leftPad, rightPad]]</code>.</li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">dataFormat</span> <span class="param-type">('channelsFirst'|'channelsLast')</span> <span class="param-docs">One of <code>'channelsLast'</code> (default) and <code>'channelsFirst'</code>.</p> <p>The ordering of the dimensions in the inputs. <code>channelsLast</code> corresponds to inputs with shape <code>[batch, height, width, channels]</code> while <code>channelsFirst</code> corresponds to inputs with shape <code>[batch, channels, height, width]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">ZeroPadding2D</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Noise" href="#Layers-Noise" class="symbol-link"> Layers / Noise </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.alphaDropout" href="#layers.alphaDropout"> tf.layers.alphaDropout</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1675-L1677" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Applies Alpha Dropout to the input.</p> <p>As it is a regularization layer, it is only active at training time.</p> <p>Alpha Dropout is a <code>Dropout</code> that keeps mean and variance of inputs to their original values, in order to ensure the self-normalizing property even after this dropout. Alpha Dropout fits well to Scaled Exponential Linear Units by randomly setting activations to the negative saturation value.</p> <p>Arguments:</p> <ul> <li><code>rate</code>: float, drop probability (as with <code>Dropout</code>). The multiplicative noise will have standard deviation <code>sqrt(rate / (1 - rate))</code>.</li> <li><code>noise_shape</code>: A 1-D <code>Tensor</code> of type <code>int32</code>, representing the shape for randomly generated keep/drop flags.</li> </ul> <p>Input shape: Arbitrary. Use the keyword argument <code>inputShape</code> (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as input.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="https://arxiv.org/abs/1706.02515">Self-Normalizing Neural Networks</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">rate</span> <span class="param-type">(number)</span> <span class="param-docs">drop probability.</span> </li> <li class="parameter config-param"> <span class="param-name">noiseShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">A 1-D <code>Tensor</code> of type <code>int32</code>, representing the shape for randomly generated keep/drop flags.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">AlphaDropout</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.gaussianDropout" href="#layers.gaussianDropout"> tf.layers.gaussianDropout</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1640-L1642" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Apply multiplicative 1-centered Gaussian noise.</p> <p>As it is a regularization layer, it is only active at training time.</p> <p>Arguments:</p> <ul> <li><code>rate</code>: float, drop probability (as with <code>Dropout</code>). The multiplicative noise will have standard deviation <code>sqrt(rate / (1 - rate))</code>.</li> </ul> <p>Input shape: Arbitrary. Use the keyword argument <code>inputShape</code> (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as input.</p> <p>References:</p> <ul> <li><a target="_blank" rel="noopener" href="http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf">Dropout: A Simple Way to Prevent Neural Networks from Overfitting</a></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">rate</span> <span class="param-type">(number)</span> <span class="param-docs">drop probability.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GaussianDropout</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.gaussianNoise" href="#layers.gaussianNoise"> tf.layers.gaussianNoise</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1612-L1614" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Apply additive zero-centered Gaussian noise.</p> <p>As it is a regularization layer, it is only active at training time.</p> <p>This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs.</p> <h1>Arguments</h1> <p>stddev: float, standard deviation of the noise distribution.</p> <h1>Input shape</h1> <p>Arbitrary. Use the keyword argument <code>input_shape</code> (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model.</p> <h1>Output shape</h1> <p>Same shape as input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">stddev</span> <span class="param-type">(number)</span> <span class="param-docs">Standard Deviation.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">GaussianNoise</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Mask" href="#Layers-Mask" class="symbol-link"> Layers / Mask </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.masking" href="#layers.masking"> tf.layers.masking</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1702-L1704" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Masks a sequence by using a mask value to skip timesteps.</p> <p>If all features for a given sample timestep are equal to <code>mask_value</code>, then the sample timestep will be masked (skipped) in all downstream layers (as long as they support masking).</p> <p>If any downstream layer does not support masking yet receives such an input mask, an exception will be raised.</p> <p>Arguments:</p> <ul> <li><code>maskValue</code>: Either None or mask value to skip.</li> </ul> <p>Input shape: Arbitrary. Use the keyword argument <code>inputShape</code> (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model.</p> <p>Output shape: Same shape as input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">maskValue</span> <span class="param-type">(number)</span> <span class="param-docs">Masking Value. Defaults to <code>0.0</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Masking</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Rescaling" href="#Layers-Rescaling" class="symbol-link"> Layers / Rescaling </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.rescaling" href="#layers.rescaling"> tf.layers.rescaling</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1733-L1735" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A preprocessing layer which rescales input values to a new range.</p> <p>This layer rescales every value of an input (often an image) by multiplying by <code>scale</code> and adding <code>offset</code>.</p> <p>For instance:</p> <ol> <li>To rescale an input in the <code>[0, 255]</code> range to be in the <code>[0, 1]</code> range, you would pass <code>scale=1/255</code>.</li> <li>To rescale an input in the <code>[0, 255]</code> range to be in the <code>[-1, 1]</code> range, you would pass <code>scale=1./127.5, offset=-1</code>. The rescaling is applied both during training and inference. Inputs can be of integer or floating point dtype, and by default the layer will output floats.</li> </ol> <p>Arguments:</p> <ul> <li><code>scale</code>: Float, the scale to apply to the inputs.</li> <li><code>offset</code>: Float, the offset to apply to the inputs.</li> </ul> <p>Input shape: Arbitrary.</p> <p>Output shape: Same as input.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">scale</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">offset</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Rescaling</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-CenterCrop" href="#Layers-CenterCrop" class="symbol-link"> Layers / CenterCrop </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.centerCrop" href="#layers.centerCrop"> tf.layers.centerCrop</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1766-L1768" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A preprocessing layer which center crops images.</p> <p>This layers crops the central portion of the images to a target size. If an image is smaller than the target size, it will be resized and cropped so as to return the largest possible window in the image that matches the target aspect ratio.</p> <p>Input pixel values can be of any range (e.g. <code>[0., 1.)</code> or <code>[0, 255]</code>) and of integer or floating point dtype.</p> <p>If the input height/width is even and the target height/width is odd (or inversely), the input image is left-padded by 1 pixel.</p> <p>Arguments: <code>height</code>: Integer, the height of the output shape. <code>width</code>: Integer, the width of the output shape.</p> <p>Input shape: 3D (unbatched) or 4D (batched) tensor with shape: <code>(..., height, width, channels)</code>, in <code>channelsLast</code> format.</p> <p>Output shape: 3D (unbatched) or 4D (batched) tensor with shape: <code>(..., targetHeight, targetWidth, channels)</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">height</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">width</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">CenterCrop</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-Resizing" href="#Layers-Resizing" class="symbol-link"> Layers / Resizing </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.resizing" href="#layers.resizing"> tf.layers.resizing</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1792-L1794" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A preprocessing layer which resizes images. This layer resizes an image input to a target height and width. The input should be a 4D (batched) or 3D (unbatched) tensor in <code>&quot;channels_last&quot;</code> format. Input pixel values can be of any range (e.g. <code>[0., 1.)</code> or <code>[0, 255]</code>) and of interger or floating point dtype. By default, the layer will output floats.</p> <p>Arguments:</p> <ul> <li><code>height</code>: number, the height for the output tensor.</li> <li><code>width</code>: number, the width for the output tensor.</li> <li><code>interpolation</code>: string, the method for image resizing interpolation.</li> <li><code>cropToAspectRatio</code>: boolean, whether to keep image aspect ratio.</li> </ul> <p>Input shape: Arbitrary.</p> <p>Output shape: height, width, num channels.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">height</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">width</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">interpolation</span> <span class="param-type">(InterpolationType)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">cropToAspectRatio</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Resizing</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-CategoryEncoding" href="#Layers-CategoryEncoding" class="symbol-link"> Layers / CategoryEncoding </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.categoryEncoding" href="#layers.categoryEncoding"> tf.layers.categoryEncoding</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1839-L1841" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A preprocessing layer which encodes integer features.</p> <p>This layer provides options for condensing data into a categorical encoding when the total number of tokens are known in advance. It accepts integer values as inputs, and it outputs a dense representation of those inputs.</p> <p>Arguments:</p> <p>numTokens: The total number of tokens the layer should support. All inputs to the layer must integers in the range <code>0 &lt;= value &lt; numTokens</code>, or an error will be thrown.</p> <p>outputMode: Specification for the output of the layer. Defaults to <code>multiHot</code>. Values can be <code>oneHot</code>, <code>multiHot</code> or <code>count</code>, configuring the layer as follows:</p> <pre><code>oneHot: Encodes each individual element in the input into an array of `numTokens` size, containing a 1 at the element index. If the last dimension is size 1, will encode on that dimension. If the last dimension is not size 1, will append a new dimension for the encoded output. multiHot: Encodes each sample in the input into a single array of `numTokens` size, containing a 1 for each vocabulary term present in the sample. Treats the last dimension as the sample dimension, if input shape is `(..., sampleLength)`, output shape will be `(..., numTokens)`. count: Like `multiHot`, but the int array contains a count of the number of times the token at that index appeared in the sample. </code></pre> <p>For all output modes, currently only output up to rank 2 is supported. Call arguments: inputs: A 1D or 2D tensor of integer inputs. countWeights: A tensor in the same shape as <code>inputs</code> indicating the weight for each sample value when summing up in <code>count</code> mode. Not used in <code>multiHot</code> or <code>oneHot</code> modes.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">numTokens</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">outputMode</span> <span class="param-type">(OutputMode)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">CategoryEncoding</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Layers-RandomWidth" href="#Layers-RandomWidth" class="symbol-link"> Layers / RandomWidth </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="layers.randomWidth" href="#layers.randomWidth"> tf.layers.randomWidth</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_layers.ts#L1887-L1889" target=_blank>Source</a> </span> </div> <div class="documentation"><p>A preprocessing layer which randomly varies image width during training.</p> <p>This layer will randomly adjusts the width of a batch of images of a batch of images by a random factor.</p> <p>The input should be a 3D (unbatched) or 4D (batched) tensor in the <code>&quot;channels_last&quot;</code> image data format. Input pixel values can be of any range (e.g. <code>[0., 1.)</code> or <code>[0, 255]</code>) and of integer or floating point dtype. By default, the layer will output floats. By default, this layer is inactive during inference. For an overview and full list of preprocessing layers, see the preprocessing [guide] (https://www.tensorflow.org/guide/keras/preprocessing_layers).</p> <p>Arguments:</p> <p>factor: A positive float (fraction of original width), or a tuple of size 2 representing lower and upper bound for resizing vertically. When represented as a single float, this value is used for both the upper and lower bound. For instance, <code>factor=(0.2, 0.3)</code> results in an output with width changed by a random amount in the range <code>[20%, 30%]</code>. <code>factor=(-0.2, 0.3)</code> results in an output with width changed by a random amount in the range <code>[-20%, +30%]</code>. <code>factor=0.2</code> results in an output with width changed by a random amount in the range <code>[-20%, +20%]</code>. interpolation: String, the interpolation method. Defaults to <code>bilinear</code>. Supports <code>&quot;bilinear&quot;</code>, <code>&quot;nearest&quot;</code>. The tf methods <code>&quot;bicubic&quot;</code>, <code>&quot;area&quot;</code>, <code>&quot;lanczos3&quot;</code>, <code>&quot;lanczos5&quot;</code>, <code>&quot;gaussian&quot;</code>, <code>&quot;mitchellcubic&quot;</code> are unimplemented in tfjs. seed: Integer. Used to create a random seed.</p> <p>Input shape: 3D (unbatched) or 4D (batched) tensor with shape: <code>(..., height, width, channels)</code>, in <code>&quot;channels_last&quot;</code> format. Output shape: 3D (unbatched) or 4D (batched) tensor with shape: <code>(..., height, random_width, channels)</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">factor</span> <span class="param-type">(number | [number, number])</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">interpolation</span> <span class="param-type">(InterpolationType)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">autoVectorize</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">inputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchInputShape</span> <span class="param-type">((null | number)[])</span> <span class="param-docs">If defined, will be used to create an input layer to insert before this layer. If both <code>inputShape</code> and <code>batchInputShape</code> are defined, <code>batchInputShape</code> will be used. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">If <code>inputShape</code> is specified and <code>batchInputShape</code> is <em>not</em> specified, <code>batchSize</code> is used to construct the <code>batchInputShape</code>: <code>[batchSize, ...inputShape]</code></span> </li> <li class="parameter config-param"> <span class="param-name">dtype</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">The data-type for this layer. Defaults to 'float32'. This argument is only applicable to input layers (the first layer of a model).</span> </li> <li class="parameter config-param"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs">Name for this layer.</span> </li> <li class="parameter config-param"> <span class="param-name">trainable</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether the weights of this layer are updatable by <code>fit</code>. Defaults to true.</span> </li> <li class="parameter config-param"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">Initial weight values of the layer.</span> </li> <li class="parameter config-param"> <span class="param-name">inputDType</span> <span class="param-type">('float32'|'int32'|'bool'|'complex64'|'string')</span> <span class="param-docs">Legacy support. Do not use for new code.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">RandomWidth</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Operations" href="#Operations" class="symbol-link">Operations</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Arithmetic" href="#Operations-Arithmetic" class="symbol-link"> Operations / Arithmetic </a> </div> <div class="description"> <p>To perform mathematical computation on Tensors, we use operations. Tensors are immutable, so all operations always return new Tensors and never modify input Tensors.</p> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="add" href="#add"> tf.add</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/add.ts#L50-L58" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Adds two <a href="#class:Tensor">tf.Tensor</a>s element-wise, A + B. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">10</span>, <span class="hljs-number">20</span>, <span class="hljs-number">30</span>, <span class="hljs-number">40</span>]); a.<span class="hljs-title function_">add</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.add(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast add a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">10</span>, <span class="hljs-number">20</span>, <span class="hljs-number">30</span>, <span class="hljs-number">40</span>]); a.<span class="hljs-title function_">add</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.add(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first <a href="#class:Tensor">tf.Tensor</a> to add.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second <a href="#class:Tensor">tf.Tensor</a> to add. Must have the same type as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sub" href="#sub"> tf.sub</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sub.ts#L50-L58" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Subtracts two <a href="#class:Tensor">tf.Tensor</a>s element-wise, A - B. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">10</span>, <span class="hljs-number">20</span>, <span class="hljs-number">30</span>, <span class="hljs-number">40</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">sub</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sub(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast subtract a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">10</span>, <span class="hljs-number">20</span>, <span class="hljs-number">30</span>, <span class="hljs-number">40</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); a.<span class="hljs-title function_">sub</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sub(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first <a href="#class:Tensor">tf.Tensor</a> to subtract from.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second <a href="#class:Tensor">tf.Tensor</a> to be subtracted. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="mul" href="#mul"> tf.mul</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/mul.ts#L53-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Multiplies two <a href="#class:Tensor">tf.Tensor</a>s element-wise, A * B. Supports broadcasting.</p> <p>We also expose <code>tf.mulStrict</code> which has the same signature as this op and asserts that <code>a</code> and <code>b</code> are the same shape (does not broadcast).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>]); a.<span class="hljs-title function_">mul</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.mul(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast mul a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); a.<span class="hljs-title function_">mul</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.mul(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor to multiply.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor to multiply. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="div" href="#div"> tf.div</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/div.ts#L53-L68" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Divides two <a href="#class:Tensor">tf.Tensor</a>s element-wise, A / B. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">9</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">div</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.div(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast div a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">2</span>); a.<span class="hljs-title function_">div</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.div(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor as the numerator.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor as the denominator. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="addN" href="#addN"> tf.addN</a> <span class="signature">(tensors)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/add_n.ts#L40-L70" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Adds a list of <a href="#class:Tensor">tf.Tensor</a>s element-wise, each with the same shape and dtype.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">5</span>, <span class="hljs-number">6</span>]); tf.<span class="hljs-title function_">addN</span>([a, b, c]).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">tensors</span> <span class="param-type">(Array)</span> <span class="param-docs">A list of tensors with the same shape and dtype.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="divNoNan" href="#divNoNan"> tf.divNoNan</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/div_no_nan.ts#L59-L70" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Divides two <a href="#class:Tensor">tf.Tensor</a>s element-wise, A / B. Supports broadcasting. Return 0 if denominator is 0.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">9</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]); a.<span class="hljs-title function_">divNoNan</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.divNoNan(a, b)</span> a.<span class="hljs-title function_">divNoNan</span>(c).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.divNoNan(a, c)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast div a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">2</span>); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">0</span>); a.<span class="hljs-title function_">divNoNan</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.divNoNan(a, b)</span> a.<span class="hljs-title function_">divNoNan</span>(c).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.divNoNan(a, c)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor as the numerator.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor as the denominator. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="floorDiv" href="#floorDiv"> tf.floorDiv</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/floorDiv.ts#L54-L63" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Divides two <a href="#class:Tensor">tf.Tensor</a>s element-wise, A / B. Supports broadcasting. The result is rounded with floor function.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">9</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">floorDiv</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.div(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast div a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">2</span>); a.<span class="hljs-title function_">floorDiv</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.floorDiv(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor as the numerator.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor as the denominator. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="maximum" href="#maximum"> tf.maximum</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/maximum.ts#L57-L72" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the max of a and b (<code>a &gt; b ? a : b</code>) element-wise. Supports broadcasting.</p> <p>We also expose <code>tf.maximumStrict</code> which has the same signature as this op and asserts that <code>a</code> and <code>b</code> are the same shape (does not broadcast).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">9</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">maximum</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.maximum(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast maximum a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); a.<span class="hljs-title function_">maximum</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.maximum(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor. Must have the same type as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="minimum" href="#minimum"> tf.minimum</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/minimum.ts#L57-L73" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the min of a and b (<code>a &lt; b ? a : b</code>) element-wise. Supports broadcasting.</p> <p>We also expose <code>minimumStrict</code> which has the same signature as this op and asserts that <code>a</code> and <code>b</code> are the same shape (does not broadcast).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">9</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">minimum</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.minimum(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast minimum a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); a.<span class="hljs-title function_">minimum</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.minimum(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor. Must have the same type as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="mod" href="#mod"> tf.mod</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/mod.ts#L56-L64" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the mod of a and b element-wise. <code>floor(x / y) * y + mod(x, y) = x</code> Supports broadcasting.</p> <p>We also expose <code>tf.modStrict</code> which has the same signature as this op and asserts that <code>a</code> and <code>b</code> are the same shape (does not broadcast).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">9</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">mod</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.mod(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast a mod b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); a.<span class="hljs-title function_">mod</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.mod(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor. Must have the same type as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="pow" href="#pow"> tf.pow</a> <span class="signature">(base, exp)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/pow.ts#L55-L64" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the power of one <a href="#class:Tensor">tf.Tensor</a> to another. Supports broadcasting.</p> <p>Given a <a href="#class:Tensor">tf.Tensor</a> x and a <a href="#class:Tensor">tf.Tensor</a> y, this operation computes x^y for corresponding elements in x and y. The result's dtype will be the upcasted type of the <code>base</code> and <code>exp</code> dtypes.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor</span>([[<span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">5</span>]]) <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">0</span>]]).<span class="hljs-title function_">toInt</span>(); a.<span class="hljs-title function_">pow</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.pow(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]) <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor</span>(<span class="hljs-number">2</span>).<span class="hljs-title function_">toInt</span>(); a.<span class="hljs-title function_">pow</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.pow(a, b)</span> </code></pre> <p>We also expose <code>powStrict</code> which has the same signature as this op and asserts that <code>base</code> and <code>exp</code> are the same shape (does not broadcast).</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">base</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The base <a href="#class:Tensor">tf.Tensor</a> to pow element-wise.</span> </li> <li class="parameter"> <span class="param-name">exp</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The exponent <a href="#class:Tensor">tf.Tensor</a> to pow element-wise.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="squaredDifference" href="#squaredDifference"> tf.squaredDifference</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/squared_difference.ts#L53-L66" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns (a - b) * (a - b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">16</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">9</span>, <span class="hljs-number">4</span>]); a.<span class="hljs-title function_">squaredDifference</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.squaredDifference(a, b)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Broadcast squared difference a with b.</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">5</span>); a.<span class="hljs-title function_">squaredDifference</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.squaredDifference(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor. Must have the same type as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Basic math" href="#Operations-Basic math" class="symbol-link"> Operations / Basic math </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="abs" href="#abs"> tf.abs</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/abs.ts#L39-L49" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes absolute value element-wise: <code>abs(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">abs</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.abs(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input <a href="#class:Tensor">tf.Tensor</a>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="acos" href="#acos"> tf.acos</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/acos.ts#L37-L42" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes acos of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>acos(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">acos</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.acos(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="acosh" href="#acosh"> tf.acosh</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/acosh.ts#L40-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the inverse hyperbolic cos of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>acosh(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">10</span>, <span class="hljs-number">1</span>, <span class="hljs-number">3</span>, <span class="hljs-number">5.7</span>]); x.<span class="hljs-title function_">acosh</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.acosh(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="asin" href="#asin"> tf.asin</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/asin.ts#L38-L43" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes asin of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>asin(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">asin</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.asin(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="asinh" href="#asinh"> tf.asinh</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/asinh.ts#L40-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes inverse hyperbolic sin of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>asinh(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">asinh</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.asinh(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="atan" href="#atan"> tf.atan</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/atan.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes atan of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>atan(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">atan</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.atan(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="atan2" href="#atan2"> tf.atan2</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/atan2.ts#L44-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes arctangent of <a href="#class:Tensor">tf.Tensor</a>s a / b element-wise: <code>atan2(a, b)</code>. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1.0</span>, <span class="hljs-number">1.0</span>, -<span class="hljs-number">1.0</span>, <span class="hljs-number">.7</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2.0</span>, <span class="hljs-number">13.0</span>, <span class="hljs-number">3.5</span>, <span class="hljs-number">.21</span>]); tf.<span class="hljs-title function_">atan2</span>(a, b).<span class="hljs-title function_">print</span>() </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="atanh" href="#atanh"> tf.atanh</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/atanh.ts#L40-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes inverse hyperbolic tan of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>atanh(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">.1</span>, -<span class="hljs-number">.1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">atanh</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.atanh(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="ceil" href="#ceil"> tf.ceil</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/ceil.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes ceiling of input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>ceil(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">.6</span>, <span class="hljs-number">1.1</span>, -<span class="hljs-number">3.3</span>]); x.<span class="hljs-title function_">ceil</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.ceil(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="clipByValue" href="#clipByValue"> tf.clipByValue</a> <span class="signature">(x, clipValueMin, clipValueMax)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/clip_by_value.ts#L43-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Clips values element-wise. <code>max(min(x, clipValueMax), clipValueMin)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">clipByValue</span>(-<span class="hljs-number">2</span>, <span class="hljs-number">3</span>).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.clipByValue(x, -2, 3)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">clipValueMin</span> <span class="param-type">(number)</span> <span class="param-docs">Lower bound of range to be clipped to.</span> </li> <li class="parameter"> <span class="param-name">clipValueMax</span> <span class="param-type">(number)</span> <span class="param-docs">Upper bound of range to be clipped to.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="cos" href="#cos"> tf.cos</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/cos.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes cos of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>cos(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">PI</span> / <span class="hljs-number">2</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">PI</span> * <span class="hljs-number">3</span> / <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">cos</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.cos(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. Must be float32 type.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="cosh" href="#cosh"> tf.cosh</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/cosh.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes hyperbolic cos of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>cosh(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">cosh</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.cosh(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. Must be float32 type.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="elu" href="#elu"> tf.elu</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/elu.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes exponential linear element-wise: <code>x &gt; 0 ? x : (e ^ x) - 1</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">2</span>]); x.<span class="hljs-title function_">elu</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.elu(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="erf" href="#erf"> tf.erf</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/erf.ts#L42-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes Gauss error function of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>erf(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">.1</span>, -<span class="hljs-number">.1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">erf</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.erf(x);</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="exp" href="#exp"> tf.exp</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/exp.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes exponential of the input <a href="#class:Tensor">tf.Tensor</a> element-wise. <code>e ^ x</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]); x.<span class="hljs-title function_">exp</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.exp(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="expm1" href="#expm1"> tf.expm1</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/expm1.ts#L40-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes exponential of the input <a href="#class:Tensor">tf.Tensor</a> minus one element-wise. <code>e ^ x - 1</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]); x.<span class="hljs-title function_">expm1</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.expm1(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="floor" href="#floor"> tf.floor</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/floor.ts#L38-L43" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes floor of input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>floor(x)</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">.6</span>, <span class="hljs-number">1.1</span>, -<span class="hljs-number">3.3</span>]); x.<span class="hljs-title function_">floor</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.floor(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="isFinite" href="#isFinite"> tf.isFinite</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/is_finite.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns which elements of x are finite.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-title class_">NaN</span>, <span class="hljs-title class_">Infinity</span>, -<span class="hljs-title class_">Infinity</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>]); x.<span class="hljs-built_in">isFinite</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.isNaN(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="isInf" href="#isInf"> tf.isInf</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/is_inf.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns which elements of x are Infinity or -Infinity.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-title class_">NaN</span>, <span class="hljs-title class_">Infinity</span>, -<span class="hljs-title class_">Infinity</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>]); x.<span class="hljs-title function_">isInf</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.isNaN(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="isNaN" href="#isNaN"> tf.isNaN</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/is_nan.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns which elements of x are NaN.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-title class_">NaN</span>, <span class="hljs-title class_">Infinity</span>, -<span class="hljs-title class_">Infinity</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>]); x.<span class="hljs-built_in">isNaN</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.isNaN(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="leakyRelu" href="#leakyRelu"> tf.leakyRelu</a> <span class="signature">(x, alpha?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/leaky_relu.ts#L45-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes leaky rectified linear element-wise.</p> <p>See <a target="_blank" rel="noopener" href="http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf">http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf</a></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">leakyRelu</span>(<span class="hljs-number">0.1</span>).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.leakyRelu(x, 0.1)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">alpha</span> <span class="param-type">(number)</span> <span class="param-docs">The scaling factor for negative values, defaults to 0.2.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="log" href="#log"> tf.log</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/log.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes natural logarithm of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>ln(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">E</span>]); x.<span class="hljs-title function_">log</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.log(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="log1p" href="#log1p"> tf.log1p</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/log1p.ts#L40-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes natural logarithm of the input <a href="#class:Tensor">tf.Tensor</a> plus one element-wise: <code>ln(1 + x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">E</span> - <span class="hljs-number">1</span>]); x.<span class="hljs-title function_">log1p</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.log1p(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logSigmoid" href="#logSigmoid"> tf.logSigmoid</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/log_sigmoid.ts#L42-L62" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes log sigmoid of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>logSigmoid(x)</code>. For numerical stability, we use <code>-tf.softplus(-x)</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">logSigmoid</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.logSigmoid(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="neg" href="#neg"> tf.neg</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/neg.ts#L40-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes <code>-1 * x</code> element-wise.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">2</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); x.<span class="hljs-title function_">neg</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.neg(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="prelu" href="#prelu"> tf.prelu</a> <span class="signature">(x, alpha)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/prelu.ts#L43-L49" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes leaky rectified linear element-wise with parametric alphas.</p> <p><code>x &lt; 0 ? alpha * x : f(x) = x</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> alpha = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">0.1</span>); x.<span class="hljs-title function_">prelu</span>(alpha).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.prelu(x, alpha)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">alpha</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Scaling factor for negative values.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="reciprocal" href="#reciprocal"> tf.reciprocal</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/reciprocal.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes reciprocal of x element-wise: <code>1 / x</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); x.<span class="hljs-title function_">reciprocal</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.reciprocal(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="relu" href="#relu"> tf.relu</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/relu.ts#L40-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes rectified linear element-wise: <code>max(x, 0)</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">relu</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.relu(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. If the dtype is <code>bool</code>, the output dtype will be <code>int32</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="relu6" href="#relu6"> tf.relu6</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/relu6.ts#L40-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes rectified linear 6 element-wise: <code>min(max(x, 0), 6)</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">8</span>]); x.<span class="hljs-title function_">relu6</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.relu6(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. If the dtype is <code>bool</code>, the output dtype will be <code>int32</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="round" href="#round"> tf.round</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/round.ts#L40-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes round of input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>round(x)</code>. It implements banker's rounding.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">.6</span>, <span class="hljs-number">1.1</span>, -<span class="hljs-number">3.3</span>]); x.<span class="hljs-title function_">round</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.round(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="rsqrt" href="#rsqrt"> tf.rsqrt</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/rsqrt.ts#L40-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes reciprocal of square root of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>y = 1 / sqrt(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>, -<span class="hljs-number">1</span>]); x.<span class="hljs-title function_">rsqrt</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.rsqrt(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="selu" href="#selu"> tf.selu</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/selu.ts#L41-L47" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes scaled exponential linear element-wise.</p> <p><code>x &lt; 0 ? scale * alpha * (exp(x) - 1) : scale * x</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">selu</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.selu(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sigmoid" href="#sigmoid"> tf.sigmoid</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sigmoid.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes sigmoid element-wise, <code>1 / (1 + exp(-x))</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]); x.<span class="hljs-title function_">sigmoid</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sigmoid(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sign" href="#sign"> tf.sign</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sign.ts#L39-L43" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns an element-wise indication of the sign of a number.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">.6</span>, <span class="hljs-number">1.1</span>, -<span class="hljs-number">3.3</span>, <span class="hljs-title class_">NaN</span>, <span class="hljs-number">0</span>]); x.<span class="hljs-title function_">sign</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sign(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sin" href="#sin"> tf.sin</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sin.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes sin of the input Tensor element-wise: <code>sin(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">PI</span> / <span class="hljs-number">2</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">PI</span> * <span class="hljs-number">3</span> / <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">sin</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sin(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sinh" href="#sinh"> tf.sinh</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sinh.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes hyperbolic sin of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>sinh(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">sinh</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sinh(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="softplus" href="#softplus"> tf.softplus</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/softplus.ts#L39-L44" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes softplus of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>log(exp(x) + 1)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">.7</span>]); x.<span class="hljs-title function_">softplus</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.softplus(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sqrt" href="#sqrt"> tf.sqrt</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sqrt.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes square root of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>y = sqrt(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>, -<span class="hljs-number">1</span>]); x.<span class="hljs-title function_">sqrt</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sqrt(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="square" href="#square"> tf.square</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/square.ts#L36-L40" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes square of <code>x</code> element-wise: <code>x ^ 2</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-title function_">sqrt</span>(<span class="hljs-number">2</span>), -<span class="hljs-number">1</span>]); x.<span class="hljs-title function_">square</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.square(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="step" href="#step"> tf.step</a> <span class="signature">(x, alpha?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/step.ts#L41-L50" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes step of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>x &gt; 0 ? 1 : alpha</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">2</span>, -<span class="hljs-number">1</span>, -<span class="hljs-number">3</span>]); x.<span class="hljs-title function_">step</span>(<span class="hljs-number">.5</span>).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.step(x, .5)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">alpha</span> <span class="param-type">(number)</span> <span class="param-docs">The gradient when input is negative. Defaults to 0.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tan" href="#tan"> tf.tan</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tan.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes tan of the input <a href="#class:Tensor">tf.Tensor</a> element-wise, <code>tan(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">PI</span> / <span class="hljs-number">2</span>, <span class="hljs-title class_">Math</span>.<span class="hljs-property">PI</span> * <span class="hljs-number">3</span> / <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">tan</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.tan(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tanh" href="#tanh"> tf.tanh</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tanh.ts#L39-L45" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes hyperbolic tangent of the input <a href="#class:Tensor">tf.Tensor</a> element-wise: <code>tanh(x)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">70</span>]); x.<span class="hljs-title function_">tanh</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.tanh(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Matrices" href="#Operations-Matrices" class="symbol-link"> Operations / Matrices </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="dot" href="#dot"> tf.dot</a> <span class="signature">(t1, t2)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/dot.ts#L44-L80" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the dot product of two matrices and/or vectors, <code>t1</code> and <code>t2</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]]); a.<span class="hljs-title function_">dot</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.dot(a, b)</span> b.<span class="hljs-title function_">dot</span>(a).<span class="hljs-title function_">print</span>(); b.<span class="hljs-title function_">dot</span>(c).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">t1</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first tensor in the dot operation.</span> </li> <li class="parameter"> <span class="param-name">t2</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second tensor in the dot operation.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="euclideanNorm" href="#euclideanNorm"> tf.euclideanNorm</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/euclidean_norm.ts#L47-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the Euclidean norm of scalar, vectors, and matrices.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">euclideanNorm</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.euclideanNorm(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input array.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">Optional. If axis is null (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the Tensor, i.e. euclideanNorm(x) is equivalent to euclideanNorm(x.reshape([-1])). If axis is an integer, the input is considered a batch of vectors, and axis determines the axis in x over which to compute vector norms. If axis is a 2-tuple of integer it is considered a batch of matrices and axis determines the axes in NDArray over which to compute a matrix norm.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">Optional. If true, the norm has the same dimensionality as the input.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="matMul" href="#matMul"> tf.matMul</a> <span class="signature">(a, b, transposeA?, transposeB?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/mat_mul.ts#L44-L57" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the dot product of two matrices, A * B. These must be matrices.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">matMul</span>(b).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.matMul(a, b)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">First matrix in dot product operation.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Second matrix in dot product operation.</span> </li> <li class="parameter"> <span class="param-name">transposeA</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, <code>a</code> is transposed before multiplication.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">transposeB</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, <code>b</code> is transposed before multiplication.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="norm" href="#norm"> tf.norm</a> <span class="signature">(x, ord?, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/norm.ts#L73-L85" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the norm of scalar, vectors, and matrices. This function can compute several different vector norms (the 1-norm, the Euclidean or 2-norm, the inf-norm, and in general the p-norm for p &gt; 0) and matrix norms (Frobenius, 1-norm, and inf-norm).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">norm</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.norm(x)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input array.</span> </li> <li class="parameter"> <span class="param-name">ord</span> <span class="param-type">(number|'euclidean'|'fro')</span> <span class="param-docs">Optional. Order of the norm. Supported norm types are following:</p> <table> <thead> <tr> <th>ord</th> <th>norm for matrices</th> <th>norm for vectors</th> </tr> </thead> <tbody> <tr> <td>'euclidean'</td> <td>Frobenius norm</td> <td>2-norm</td> </tr> <tr> <td>'fro'</td> <td>Frobenius norm</td> <td></td> </tr> <tr> <td>Infinity</td> <td>max(sum(abs(x), axis=1))</td> <td>max(abs(x))</td> </tr> <tr> <td>-Infinity</td> <td>min(sum(abs(x), axis=1))</td> <td>min(abs(x))</td> </tr> <tr> <td>1</td> <td>max(sum(abs(x), axis=0))</td> <td>sum(abs(x))</td> </tr> <tr> <td>2</td> <td></td> <td>sum(abs(x)^2)^(1/2)</td> </tr> </tbody> </table> </span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">Optional. If axis is null (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the Tensor, i.e. norm(x, ord) is equivalent to norm(x.reshape([-1]), ord). If axis is an integer, the input is considered a batch of vectors, and axis determines the axis in x over which to compute vector norms. If axis is a 2-tuple of integer it is considered a batch of matrices and axis determines the axes in NDArray over which to compute a matrix norm.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">Optional. If true, the norm has the same dimensionality as the input.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="outerProduct" href="#outerProduct"> tf.outerProduct</a> <span class="signature">(v1, v2)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/outer_product.ts#L40-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the outer product of two vectors, <code>v1</code> and <code>v2</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>]); tf.<span class="hljs-title function_">outerProduct</span>(a, b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">v1</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first vector in the outer product operation.</span> </li> <li class="parameter"> <span class="param-name">v2</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second vector in the outer product operation.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="transpose" href="#transpose"> tf.transpose</a> <span class="signature">(x, perm?, conjugate?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/transpose.ts#L53-L98" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Transposes the <a href="#class:Tensor">tf.Tensor</a>. Permutes the dimensions according to <code>perm</code>.</p> <p>The returned <a href="#class:Tensor">tf.Tensor</a>'s dimension <code>i</code> will correspond to the input dimension <code>perm[i]</code>. If <code>perm</code> is not given, it is set to <code>[n-1...0]</code>, where <code>n</code> is the rank of the input <a href="#class:Tensor">tf.Tensor</a>. Hence by default, this operation performs a regular matrix transpose on 2-D input <a href="#class:Tensor">tf.Tensor</a>s.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); a.<span class="hljs-title function_">transpose</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.transpose(a)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to transpose.</span> </li> <li class="parameter"> <span class="param-name">perm</span> <span class="param-type">(number[])</span> <span class="param-docs">The permutation of the dimensions of a.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">conjugate</span> <span class="param-type">(boolean)</span> <span class="param-docs">Will conjugate complex input if true.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Convolution" href="#Operations-Convolution" class="symbol-link"> Operations / Convolution </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="avgPool" href="#avgPool"> tf.avgPool</a> <span class="signature">(x, filterSize, strides, pad, dimRoundingMode?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/avg_pool.ts#L54-L93" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the 2D average pooling of an image.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 4 or rank 3 of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filterSize</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The filter size: <code>[filterHeight, filterWidth]</code>. If <code>filterSize</code> is a single number, then <code>filterHeight == filterWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the pooling: <code>[strideHeight, strideWidth]</code>. If <code>strides</code> is a single number, then <code>strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number|conv_util.ExplicitPadding)</span> <span class="param-docs">The type of padding algorithm:</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="avgPool3d" href="#avgPool3d"> tf.avgPool3d</a> <span class="signature">(x, filterSize, strides, pad, dimRoundingMode?, dataFormat?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/avg_pool_3d.ts#L68-L113" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the 3D average pooling.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor5d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]); <span class="hljs-keyword">const</span> result = tf.<span class="hljs-title function_">avgPool3d</span>(x, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;valid&#x27;</span>); result.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 5 or rank 4 of shape <code>[batch, depth, height, width, inChannels]</code>.</span> </li> <li class="parameter"> <span class="param-name">filterSize</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The filter size: <code>[filterDepth, filterHeight, filterWidth]</code>. If <code>filterSize</code> is a single number, then <code>filterDepth == filterHeight == filterWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The strides of the pooling: <code>[strideDepth, strideHeight, strideWidth]</code>. If <code>strides</code> is a single number, then <code>strideDepth == strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number)</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1*1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NDHWC'|'NCDHW')</span> <span class="param-docs">An optional string from: &quot;NDHWC&quot;, &quot;NCDHW&quot;. Defaults to &quot;NDHWC&quot;. Specify the data format of the input and output data. With the default format &quot;NDHWC&quot;, the data is stored in the order of: [batch, depth, height, width, channels]. Only &quot;NDHWC&quot; is currently supported.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="conv1d" href="#conv1d"> tf.conv1d</a> <span class="signature">(x, filter, stride, pad, dataFormat?, dilation?, dimRoundingMode?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/conv1d.ts#L55-L114" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes a 1D convolution over the input x.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 3 or rank 2, of shape <code>[batch, width, inChannels]</code>. If rank 2, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter, rank 3, of shape <code>[filterWidth, inDepth, outDepth]</code>.</span> </li> <li class="parameter"> <span class="param-name">stride</span> <span class="param-type">(number)</span> <span class="param-docs">The number of entries by which the filter is moved right at each step.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number|conv_util.ExplicitPadding)</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NWC'|'NCW')</span> <span class="param-docs">An optional string from &quot;NWC&quot;, &quot;NCW&quot;. Defaults to &quot;NWC&quot;, the data is stored in the order of [batch, in_width, in_channels]. Only &quot;NWC&quot; is currently supported.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dilation</span> <span class="param-type">(number)</span> <span class="param-docs">The dilation rate in which we sample input values in atrous convolution. Defaults to <code>1</code>. If it is greater than 1, then stride must be <code>1</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="conv2d" href="#conv2d"> tf.conv2d</a> <span class="signature">(x, filter, strides, pad, dataFormat?, dilations?, dimRoundingMode?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/conv2d.ts#L62-L117" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes a 2D convolution over the input x.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 4 or rank 3, of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter, rank 4, of shape <code>[filterHeight, filterWidth, inDepth, outDepth]</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the convolution: <code>[strideHeight, strideWidth]</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number|conv_util.ExplicitPadding)</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NHWC'|'NCHW')</span> <span class="param-docs">: An optional string from: &quot;NHWC&quot;, &quot;NCHW&quot;. Defaults to &quot;NHWC&quot;. Specify the data format of the input and output data. With the default format &quot;NHWC&quot;, the data is stored in the order of: [batch, height, width, channels].</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dilations</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The dilation rates: <code>[dilationHeight, dilationWidth]</code> in which we sample input values across the height and width dimensions in atrous convolution. Defaults to <code>[1, 1]</code>. If <code>dilations</code> is a single number, then <code>dilationHeight == dilationWidth</code>. If it is greater than 1, then all values of <code>strides</code> must be 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="conv2dTranspose" href="#conv2dTranspose"> tf.conv2dTranspose</a> <span class="signature">(x, filter, outputShape, strides, pad, dimRoundingMode?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/conv2d_transpose.ts#L45-L56" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the transposed 2D convolution of an image, also known as a deconvolution.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input image, of rank 4 or rank 3, of shape <code>[batch, height, width, inDepth]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter, rank 4, of shape <code>[filterHeight, filterWidth, outDepth, inDepth]</code>. <code>inDepth</code> must match <code>inDepth</code> in <code>x</code>.</span> </li> <li class="parameter"> <span class="param-name">outputShape</span> <span class="param-type">([number, number, number, number]|[number, number, number])</span> <span class="param-docs">Output shape, of rank 4 or rank 3: <code>[batch, height, width, outDepth]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the original convolution: <code>[strideHeight, strideWidth]</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number|ExplicitPadding)</span> <span class="param-docs">The type of padding algorithm used in the non-transpose version of the op.</span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="conv3d" href="#conv3d"> tf.conv3d</a> <span class="signature">(x, filter, strides, pad, dataFormat?, dilations?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/conv3d.ts#L62-L118" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes a 3D convolution over the input x.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 5 or rank 4, of shape <code>[batch, depth, height, width, channels]</code>. If rank 4, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor5D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter, rank 5, of shape <code>[filterDepth, filterHeight, filterWidth, inChannels, outChannels]</code>. inChannels must match between input and filter.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The strides of the convolution: <code>[strideDepth, strideHeight, strideWidth]</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same')</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NDHWC'|'NCDHW')</span> <span class="param-docs">: An optional string from: &quot;NDHWC&quot;, &quot;NCDHW&quot;. Defaults to &quot;NDHWC&quot;. Specify the data format of the input and output data. With the default format &quot;NDHWC&quot;, the data is stored in the order of: [batch, depth, height, width, channels]. Only &quot;NDHWC&quot; is currently supported.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dilations</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The dilation rates: <code>[dilationDepth, dilationHeight, dilationWidth]</code> in which we sample input values across the height and width dimensions in atrous convolution. Defaults to <code>[1, 1, 1]</code>. If <code>dilations</code> is a single number, then <code>dilationDepth == dilationHeight == dilationWidth</code>. If it is greater than 1, then all values of <code>strides</code> must be 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="conv3dTranspose" href="#conv3dTranspose"> tf.conv3dTranspose</a> <span class="signature">(x, filter, outputShape, strides, pad)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/conv3d_transpose.ts#L43-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the transposed 3D convolution of a volume, also known as a deconvolution.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input image, of rank 5 or rank 4, of shape <code>[batch, depth, height, width, inDepth]</code>. If rank 4, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor5D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter, rank 4, of shape <code>[depth, filterHeight, filterWidth, outDepth, inDepth]</code>. <code>inDepth</code> must match <code>inDepth</code> in <code>x</code>.</span> </li> <li class="parameter"> <span class="param-name">outputShape</span> <span class="param-type">([number, number, number, number, number]|[number, number, number, number])</span> <span class="param-docs">Output shape, of rank 5 or rank 4: <code>[batch, depth, height, width, outDepth]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The strides of the original convolution: <code>[strideDepth, strideHeight, strideWidth]</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same')</span> <span class="param-docs">The type of padding algorithm used in the non-transpose version of the op.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="depthwiseConv2d" href="#depthwiseConv2d"> tf.depthwiseConv2d</a> <span class="signature">(x, filter, strides, pad, dataFormat?, dilations?, dimRoundingMode?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/depthwise_conv2d.ts#L75-L120" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Depthwise 2D convolution.</p> <p>Given a 4D <code>input</code> array and a <code>filter</code> array of shape <code>[filterHeight, filterWidth, inChannels, channelMultiplier]</code> containing <code>inChannels</code> convolutional filters of depth 1, this op applies a different filter to each input channel (expanding from 1 channel to <code>channelMultiplier</code> channels for each), then concatenates the results together. The output has <code>inChannels * channelMultiplier</code> channels.</p> <p>See <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d">https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d</a> for more details.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 4 or rank 3, of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter tensor, rank 4, of shape <code>[filterHeight, filterWidth, inChannels, channelMultiplier]</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the convolution: <code>[strideHeight, strideWidth]</code>. If strides is a single number, then <code>strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number|conv_util.ExplicitPadding)</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NHWC'|'NCHW')</span> <span class="param-docs">: An optional string from: &quot;NHWC&quot;, &quot;NCHW&quot;. Defaults to &quot;NHWC&quot;. Specify the data format of the input and output data. With the default format &quot;NHWC&quot;, the data is stored in the order of: [batch, height, width, channels]. Only &quot;NHWC&quot; is currently supported.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dilations</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The dilation rates: <code>[dilationHeight, dilationWidth]</code> in which we sample input values across the height and width dimensions in atrous convolution. Defaults to <code>[1, 1]</code>. If <code>rate</code> is a single number, then <code>dilationHeight == dilationWidth</code>. If it is greater than 1, then all values of <code>strides</code> must be 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="dilation2d" href="#dilation2d"> tf.dilation2d</a> <span class="signature">(x, filter, strides, pad, dilations?, dataFormat?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/dilation2d.ts#L61-L108" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the grayscale dilation over the input <code>x</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, rank 3 or rank 4 of shape <code>[batch, height, width, depth]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The filter tensor, rank 3, of shape <code>[filterHeight, filterWidth, depth]</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the sliding window for each dimension of the input tensor: <code>[strideHeight, strideWidth]</code>. If <code>strides</code> is a single number, then <code>strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same')</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1*1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dilations</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The dilation rates: <code>[dilationHeight, dilationWidth]</code> in which we sample input values across the height and width dimensions for atrous morphological dilation. Defaults to <code>[1, 1]</code>. If <code>dilations</code> is a single number, then <code>dilationHeight == dilationWidth</code>. If it is greater than 1, then all values of <code>strides</code> must be 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NHWC')</span> <span class="param-docs">Specify the data format of the input and output data. Defaults to 'NHWC'. Only 'NHWC' is currently supported. With the default format &quot;NHWC&quot;, the data is stored in the order of: [batch, height, width, channels].</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="maxPool3d" href="#maxPool3d"> tf.maxPool3d</a> <span class="signature">(x, filterSize, strides, pad, dimRoundingMode?, dataFormat?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/max_pool_3d.ts#L66-L104" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the 3D max pooling.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor5d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]); <span class="hljs-keyword">const</span> result = tf.<span class="hljs-title function_">maxPool3d</span>(x, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>, <span class="hljs-string">&#x27;valid&#x27;</span>); result.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 5 or rank 4 of shape <code>[batch, depth, height, width, inChannels]</code>.</span> </li> <li class="parameter"> <span class="param-name">filterSize</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The filter size: <code>[filterDepth, filterHeight, filterWidth]</code>. If <code>filterSize</code> is a single number, then <code>filterDepth == filterHeight == filterWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number, number]|number)</span> <span class="param-docs">The strides of the pooling: <code>[strideDepth, strideHeight, strideWidth]</code>. If <code>strides</code> is a single number, then <code>strideDepth == strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number)</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1*1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NDHWC'|'NCDHW')</span> <span class="param-docs">An optional string from: &quot;NDHWC&quot;, &quot;NCDHW&quot;. Defaults to &quot;NDHWC&quot;. Specify the data format of the input and output data. With the default format &quot;NDHWC&quot;, the data is stored in the order of: [batch, depth, height, width, channels]. Only &quot;NDHWC&quot; is currently supported.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="maxPoolWithArgmax" href="#maxPoolWithArgmax"> tf.maxPoolWithArgmax</a> <span class="signature">(x, filterSize, strides, pad, includeBatchInIndex?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/max_pool_with_argmax.ts#L61-L77" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the 2D max pooling of an image with Argmax index. The indices in argmax are flattened, so that a maximum value at position <code>[b, y, x, c]</code> becomes flattened index: <code>(y * width + x) * channels + c</code> if include_batch_in_index is False; <code>((b * height + y) * width + x) * channels +c</code> if include_batch_in_index is True.</p> <p>The indices returned are always in <code>[0, height) x [0, width)</code> before flattening.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 4 or rank 3 of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">filterSize</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The filter size: <code>[filterHeight, filterWidth]</code>. If <code>filterSize</code> is a single number, then <code>filterHeight == filterWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the pooling: <code>[strideHeight, strideWidth]</code>. If <code>strides</code> is a single number, then <code>strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number)</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">includeBatchInIndex</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="pool" href="#pool"> tf.pool</a> <span class="signature">(input, windowShape, poolingType, pad, dilations?, strides?, dimRoundingMode?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/pool.ts#L59-L128" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Performs an N-D pooling operation</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 4 or rank 3 of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">windowShape</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The filter size: <code>[filterHeight, filterWidth]</code>. If <code>filterSize</code> is a single number, then <code>filterHeight == filterWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">poolingType</span> <span class="param-type">('avg'|'max')</span> <span class="param-docs">The type of pooling, either 'max' or 'avg'.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same'|number|conv_util.ExplicitPadding)</span> <span class="param-docs">The type of padding algorithm:</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_guides/python/nn#Convolution">https://www.tensorflow.org/api_guides/python/nn#Convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dilations</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The dilation rates: <code>[dilationHeight, dilationWidth]</code> in which we sample input values across the height and width dimensions in dilated pooling. Defaults to <code>[1, 1]</code>. If <code>dilationRate</code> is a single number, then <code>dilationHeight == dilationWidth</code>. If it is greater than 1, then all values of <code>strides</code> must be 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the pooling: <code>[strideHeight, strideWidth]</code>. If <code>strides</code> is a single number, then <code>strideHeight == strideWidth</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dimRoundingMode</span> <span class="param-type">('floor'|'round'|'ceil')</span> <span class="param-docs">A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="separableConv2d" href="#separableConv2d"> tf.separableConv2d</a> <span class="signature">(x, depthwiseFilter, pointwiseFilter, strides, pad, dilation?, dataFormat?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/separable_conv2d.ts#L72-L137" target=_blank>Source</a> </span> </div> <div class="documentation"><p>2-D convolution with separable filters.</p> <p>Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions [1, 2] and 3, not spatial separability between dimensions 1 and 2.</p> <p>See <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d">https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d</a> for more details.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor, of rank 4 or rank 3, of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">depthwiseFilter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The depthwise filter tensor, rank 4, of shape <code>[filterHeight, filterWidth, inChannels, channelMultiplier]</code>. This is the filter used in the first step.</span> </li> <li class="parameter"> <span class="param-name">pointwiseFilter</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The pointwise filter tensor, rank 4, of shape <code>[1, 1, inChannels * channelMultiplier, outChannels]</code>. This is the filter used in the second step.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs">The strides of the convolution: <code>[strideHeight, strideWidth]</code>. If strides is a single number, then <code>strideHeight == strideWidth</code>.</span> </li> <li class="parameter"> <span class="param-name">pad</span> <span class="param-type">('valid'|'same')</span> <span class="param-docs">The type of padding algorithm.</p> <ul> <li><code>same</code> and stride 1: output will be of same size as input, regardless of filter size.</li> <li><code>valid</code>: output will be smaller than input if filter is larger than 1x1.</li> <li>For more info, see this guide: <a target="_blank" rel="noopener" href="https://www.tensorflow.org/api_docs/python/tf/nn/convolution">https://www.tensorflow.org/api_docs/python/tf/nn/convolution</a></li> </ul> </span> </li> <li class="parameter"> <span class="param-name">dilation</span> <span class="param-type">([number, number]|number)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">dataFormat</span> <span class="param-type">('NHWC'|'NCHW')</span> <span class="param-docs">: An optional string from: &quot;NHWC&quot;, &quot;NCHW&quot;. Defaults to &quot;NHWC&quot;. Specify the data format of the input and output data. With the default format &quot;NHWC&quot;, the data is stored in the order of: [batch, height, width, channels]. Only &quot;NHWC&quot; is currently supported.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Reduction" href="#Operations-Reduction" class="symbol-link"> Operations / Reduction </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="all" href="#all"> tf.all</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/all.ts#L57-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the logical and of elements across dimensions of a <a href="#class:Tensor">tf.Tensor</a>.</p> <p>Reduces the input along the dimensions given in <code>axes</code>. Unless <code>keepDims</code> is true, the rank of the <a href="#class:Tensor">tf.Tensor</a> is reduced by 1 for each entry in <code>axes</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axes</code> has no entries, all dimensions are reduced, and a <a href="#class:Tensor">tf.Tensor</a> with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); x.<span class="hljs-title function_">all</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.all(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">all</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.all(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. Must be of dtype bool.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="any" href="#any"> tf.any</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/any.ts#L57-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the logical or of elements across dimensions of a <a href="#class:Tensor">tf.Tensor</a>.</p> <p>Reduces the input along the dimensions given in <code>axes</code>. Unless <code>keepDims</code> is true, the rank of the <a href="#class:Tensor">tf.Tensor</a> is reduced by 1 for each entry in <code>axes</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axes</code> has no entries, all dimensions are reduced, and a <a href="#class:Tensor">tf.Tensor</a> with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); x.<span class="hljs-title function_">any</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.any(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">any</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.any(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. Must be of dtype bool.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="argMax" href="#argMax"> tf.argMax</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/arg_max.ts#L52-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the indices of the maximum values along an <code>axis</code>.</p> <p>The result has the same shape as <code>input</code> with the dimension along <code>axis</code> removed.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">argMax</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.argMax(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">argMax</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.argMax(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension to reduce. Defaults to 0 (outer-most dimension).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="argMin" href="#argMin"> tf.argMin</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/arg_min.ts#L52-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the indices of the minimum values along an <code>axis</code>.</p> <p>The result has the same shape as <code>input</code> with the dimension along <code>axis</code> removed.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">argMin</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.argMin(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">argMin</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.argMin(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension to reduce. Defaults to 0 (outer-most dimension).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="bincount" href="#bincount"> tf.bincount</a> <span class="signature">(x, weights, size)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/bincount.ts#L46-L68" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Outputs a vector with length <code>size</code> and the same dtype as <code>weights</code>.</p> <p>If <code>weights</code> are empty, then index <code>i</code> stores the number of times the value <code>i</code> is counted in <code>x</code>. If <code>weights</code> are non-empty, then index <code>i</code> stores the sum of the value in <code>weights</code> at each index where the corresponding value in <code>x</code> is <code>i</code>.</p> <p>Values in <code>x</code> outside of the range [0, size) are ignored.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input int tensor, rank 1.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The weights tensor, must have the same shape as x, or a length-0 Tensor, in which case it acts as all weights equal to 1.</span> </li> <li class="parameter"> <span class="param-name">size</span> <span class="param-type">(number)</span> <span class="param-docs">Non-negative integer.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="denseBincount" href="#denseBincount"> tf.denseBincount</a> <span class="signature">(x, weights, size, binaryOutput?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/dense_bincount.ts#L48-L76" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Outputs a vector with length <code>size</code> and the same dtype as <code>weights</code>.</p> <p>If <code>weights</code> are empty, then index <code>i</code> stores the number of times the value <code>i</code> is counted in <code>x</code>. If <code>weights</code> are non-empty, then index <code>i</code> stores the sum of the value in <code>weights</code> at each index where the corresponding value in <code>x</code> is <code>i</code>.</p> <p>Values in <code>x</code> outside of the range [0, size) are ignored.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input int tensor, rank 1 or rank 2.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The weights tensor, must have the same shape as x, or a length-0 Tensor, in which case it acts as all weights equal to 1.</span> </li> <li class="parameter"> <span class="param-name">size</span> <span class="param-type">(number)</span> <span class="param-docs">Non-negative integer.</span> </li> <li class="parameter"> <span class="param-name">binaryOutput</span> <span class="param-type">(boolean)</span> <span class="param-docs">Optional. Whether the kernel should count the appearance or number of occurrences. Defaults to False.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a>|<a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logSumExp" href="#logSumExp"> tf.logSumExp</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/log_sum_exp.ts#L62-L79" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the log(sum(exp(elements across the reduction dimensions))).</p> <p>Reduces the input along the dimensions given in <code>axis</code>. Unless <code>keepDims</code> is true, the rank of the array is reduced by 1 for each entry in <code>axis</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axis</code> has no entries, all dimensions are reduced, and an array with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">logSumExp</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.logSumExp(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">logSumExp</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.logSumExp(a, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. If null (the default), reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with length of 1. Defaults to false.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="max" href="#max"> tf.max</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/max.ts#L57-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the maximum of elements across dimensions of a <a href="#class:Tensor">tf.Tensor</a>.</p> <p>Reduces the input along the dimensions given in <code>axes</code>. Unless <code>keepDims</code> is true, the rank of the <a href="#class:Tensor">tf.Tensor</a> is reduced by 1 for each entry in <code>axes</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axes</code> has no entries, all dimensions are reduced, and a <a href="#class:Tensor">tf.Tensor</a> with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">max</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.max(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">max</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.max(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="mean" href="#mean"> tf.mean</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/mean.ts#L57-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the mean of elements across dimensions of a <a href="#class:Tensor">tf.Tensor</a>.</p> <p>Reduces <code>x</code> along the dimensions given in <code>axis</code>. Unless <code>keepDims</code> is true, the rank of the <a href="#class:Tensor">tf.Tensor</a> is reduced by 1 for each entry in <code>axis</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axis</code> has no entries, all dimensions are reduced, and a <a href="#class:Tensor">tf.Tensor</a> with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">mean</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.mean(a)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">mean</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.mean(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="min" href="#min"> tf.min</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/min.ts#L56-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the minimum value from the input.</p> <p>Reduces the input along the dimensions given in <code>axes</code>. Unless <code>keepDims</code> is true, the rank of the array is reduced by 1 for each entry in <code>axes</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axes</code> has no entries, all dimensions are reduced, and an array with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">min</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.min(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">min</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.min(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="prod" href="#prod"> tf.prod</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/prod.ts#L59-L74" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the product of elements across dimensions of a <a href="#class:Tensor">tf.Tensor</a>.</p> <p>Reduces the input along the dimensions given in <code>axes</code>. Unless <code>keepDims</code> is true, the rank of the <a href="#class:Tensor">tf.Tensor</a> is reduced by 1 for each entry in <code>axes</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If <code>axes</code> has no entries, all dimensions are reduced, and a <a href="#class:Tensor">tf.Tensor</a> with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">prod</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.prod(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">prod</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.prod(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to compute the product over. If the dtype is <code>bool</code> it will be converted to <code>int32</code> and the output dtype will be <code>int32</code>.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sum" href="#sum"> tf.sum</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sum.ts#L58-L71" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the sum of elements across dimensions of a <a href="#class:Tensor">tf.Tensor</a>.</p> <p>Reduces the input along the dimensions given in <code>axes</code>. Unless <code>keepDims</code> is true, the rank of the <a href="#class:Tensor">tf.Tensor</a> is reduced by 1 for each entry in <code>axes</code>. If <code>keepDims</code> is true, the reduced dimensions are retained with length 1. If axes has no entries, all dimensions are reduced, and a <a href="#class:Tensor">tf.Tensor</a> with a single element is returned.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); x.<span class="hljs-title function_">sum</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sum(x)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> axis = <span class="hljs-number">1</span>; x.<span class="hljs-title function_">sum</span>(axis).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.sum(x, axis)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to compute the sum over. If the dtype is <code>bool</code> it will be converted to <code>int32</code> and the output dtype will be <code>int32</code>.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) to reduce. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, retains reduced dimensions with size 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Normalization" href="#Operations-Normalization" class="symbol-link"> Operations / Normalization </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="batchNorm" href="#batchNorm"> tf.batchNorm</a> <span class="signature">(x, mean, variance, offset?, scale?, varianceEpsilon?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/batchnorm.ts#L57-L109" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Batch normalization.</p> <p>As described in <a target="_blank" rel="noopener" href="http://arxiv.org/abs/1502.03167">http://arxiv.org/abs/1502.03167</a>.</p> <p>Mean, variance, scale, and offset can be of two shapes:</p> <ul> <li>The same shape as the input.</li> <li>In the common case, the depth dimension is the last dimension of x, so the values would be a <a href="#class:Tensor">tf.Tensor1D</a> of shape [depth].</li> </ul> <p>Also available are stricter rank-specific methods with the same signature as this method that assert that parameters passed are of given rank</p> <ul> <li><code>tf.batchNorm2d</code></li> <li><code>tf.batchNorm3d</code></li> <li><code>tf.batchNorm4d</code></li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input Tensor.</span> </li> <li class="parameter"> <span class="param-name">mean</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A mean Tensor.</span> </li> <li class="parameter"> <span class="param-name">variance</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A variance Tensor.</span> </li> <li class="parameter"> <span class="param-name">offset</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">An offset Tensor.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scale</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A scale Tensor.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">varianceEpsilon</span> <span class="param-type">(number)</span> <span class="param-docs">A small float number to avoid dividing by 0.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="localResponseNormalization" href="#localResponseNormalization"> tf.localResponseNormalization</a> <span class="signature">(x, depthRadius?, bias?, alpha?, beta?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/local_response_normalization.ts#L45-L78" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Normalizes the activation of a local neighborhood across or within channels.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. The 4-D input tensor is treated as a 3-D array of 1D vectors (along the last dimension), and each vector is normalized independently.</span> </li> <li class="parameter"> <span class="param-name">depthRadius</span> <span class="param-type">(number)</span> <span class="param-docs">The number of adjacent channels in the 1D normalization window.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">bias</span> <span class="param-type">(number)</span> <span class="param-docs">A constant bias term for the basis.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">alpha</span> <span class="param-type">(number)</span> <span class="param-docs">A scale factor, usually positive.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">beta</span> <span class="param-type">(number)</span> <span class="param-docs">An exponent.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logSoftmax" href="#logSoftmax"> tf.logSoftmax</a> <span class="signature">(logits, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/log_softmax.ts#L55-L104" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the log softmax.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); a.<span class="hljs-title function_">logSoftmax</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.logSoftmax(a)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); a.<span class="hljs-title function_">logSoftmax</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.logSoftmax(a)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">logits</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The logits array.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension softmax would be performed on. Defaults to <code>-1</code> which indicates the last dimension.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="moments" href="#moments"> tf.moments</a> <span class="signature">(x, axis?, keepDims?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/moments.ts#L45-L59" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Calculates the mean and variance of <code>x</code>. The mean and variance are calculated by aggregating the contents of <code>x</code> across <code>axes</code>. If <code>x</code> is 1-D and <code>axes = [0]</code> this is just the mean and variance of a vector.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number|number[])</span> <span class="param-docs">The dimension(s) along with to compute mean and variance. By default it reduces all dimensions.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">keepDims</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, the moments have the same dimensionality as the input.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{mean: <a href="#class:Tensor">tf.Tensor</a>, variance: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="softmax" href="#softmax"> tf.softmax</a> <span class="signature">(logits, dim?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/softmax.ts#L49-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the softmax normalized vector given the logits.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); a.<span class="hljs-title function_">softmax</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.softmax(a)</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">6</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); a.<span class="hljs-title function_">softmax</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// or tf.softmax(a)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">logits</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The logits array.</span> </li> <li class="parameter"> <span class="param-name">dim</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension softmax would be performed on. Defaults to <code>-1</code> which indicates the last dimension.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sparseToDense" href="#sparseToDense"> tf.sparseToDense</a> <span class="signature">(sparseIndices, sparseValues, outputShape, defaultValue?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sparse_to_dense.ts#L68-L94" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Converts a sparse representation into a dense tensor.</p> <p>Builds an array dense with shape outputShape such that:</p> <p>// If sparseIndices is scalar dense[i] = (i == sparseIndices ? sparseValues : defaultValue)</p> <p>// If sparseIndices is a vector, then for each i dense[sparseIndices[i]] = sparseValues[i]</p> <p>// If sparseIndices is an n by d matrix, then for each i in [0, n) dense[sparseIndices[i][0], ..., sparseIndices[i][d-1]] = sparseValues[i] All other values in dense are set to defaultValue. If sparseValues is a scalar, all sparse indices are set to this single value.</p> <p>If indices are repeated the final value is summed over all values for those indices.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> indices = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> values = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">15</span>], <span class="hljs-string">&#x27;float32&#x27;</span>); <span class="hljs-keyword">const</span> shape = [<span class="hljs-number">8</span>]; tf.<span class="hljs-title function_">sparseToDense</span>(indices, values, shape).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">sparseIndices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A 0-D, 1-D, or 2-D Tensor of type int32. sparseIndices[i] contains the complete index where sparseValues[i] will be placed.</span> </li> <li class="parameter"> <span class="param-name">sparseValues</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A 0-D or 1-D Tensor. Values corresponding to each row of sparseIndices, or a scalar value to be used for all sparse indices.</span> </li> <li class="parameter"> <span class="param-name">outputShape</span> <span class="param-type">(number[])</span> <span class="param-docs">Shape of the dense output tensor. The type is inferred.</span> </li> <li class="parameter"> <span class="param-name">defaultValue</span> <span class="param-type">(<a href="#class:Tensor">tf.Scalar</a>|ScalarLike)</span> <span class="param-docs">Scalar. Value to set for indices not specified in sparseIndices. Defaults to zero.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Images" href="#Operations-Images" class="symbol-link"> Operations / Images </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.cropAndResize" href="#image.cropAndResize"> tf.image.cropAndResize</a> <span class="signature">(image, boxes, boxInd, cropSize, method?, extrapolationValue?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/crop_and_resize.ts#L52-L96" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by cropSize.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">4d tensor of shape <code>[batch,imageHeight,imageWidth, depth]</code>, where imageHeight and imageWidth must be positive, specifying the batch of images from which to take crops</span> </li> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">2d float32 tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the normalized coordinates of the box in the <code>boxInd[i]</code>th image in the batch</span> </li> <li class="parameter"> <span class="param-name">boxInd</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">1d int32 tensor of shape <code>[numBoxes]</code> with values in range <code>[0, batch)</code> that specifies the image that the <code>i</code>-th box refers to.</span> </li> <li class="parameter"> <span class="param-name">cropSize</span> <span class="param-type">([number, number])</span> <span class="param-docs">1d int32 tensor of 2 elements <code>[cropHeigh, cropWidth]</code> specifying the size to which all crops are resized to.</span> </li> <li class="parameter"> <span class="param-name">method</span> <span class="param-type">('bilinear'|'nearest')</span> <span class="param-docs">Optional string from <code>'bilinear' | 'nearest'</code>, defaults to bilinear, which specifies the sampling method for resizing</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">extrapolationValue</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to 0.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.flipLeftRight" href="#image.flipLeftRight"> tf.image.flipLeftRight</a> <span class="signature">(image)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/flip_left_right.ts#L34-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Flips the image left to right. Currently available in the CPU, WebGL, and WASM backends.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">4d tensor of shape <code>[batch, imageHeight, imageWidth, depth]</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.grayscaleToRGB" href="#image.grayscaleToRGB"> tf.image.grayscaleToRGB</a> <span class="signature">(image)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/grayscale_to_rgb.ts#L34-L57" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Converts images from grayscale to RGB format.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>| <a href="#class:Tensor">tf.Tensor6D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A grayscale tensor to convert. The <code>image</code>'s last dimension must be size 1 with at least a two-dimensional shape.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>| <a href="#class:Tensor">tf.Tensor6D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.nonMaxSuppression" href="#image.nonMaxSuppression"> tf.image.nonMaxSuppression</a> <span class="signature">(boxes, scores, maxOutputSize, iouThreshold?, scoreThreshold?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/non_max_suppression.ts#L44-L62" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Performs non maximum suppression of bounding boxes based on iou (intersection over union).</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 2d tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the corners of the bounding box.</span> </li> <li class="parameter"> <span class="param-name">scores</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 1d tensor providing the box scores of shape <code>[numBoxes]</code>.</span> </li> <li class="parameter"> <span class="param-name">maxOutputSize</span> <span class="param-type">(number)</span> <span class="param-docs">The maximum number of boxes to be selected.</span> </li> <li class="parameter"> <span class="param-name">iouThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Must be between [0, 1]. Defaults to 0.5 (50% box overlap).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scoreThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to -inf, which means any score is accepted.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.nonMaxSuppressionAsync" href="#image.nonMaxSuppressionAsync"> tf.image.nonMaxSuppressionAsync</a> <span class="signature">(boxes, scores, maxOutputSize, iouThreshold?, scoreThreshold?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/non_max_suppression_async.ts#L45-L75" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Performs non maximum suppression of bounding boxes based on iou (intersection over union).</p> <p>This is the async version of <code>nonMaxSuppression</code></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 2d tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the corners of the bounding box.</span> </li> <li class="parameter"> <span class="param-name">scores</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 1d tensor providing the box scores of shape <code>[numBoxes]</code>.</span> </li> <li class="parameter"> <span class="param-name">maxOutputSize</span> <span class="param-type">(number)</span> <span class="param-docs">The maximum number of boxes to be selected.</span> </li> <li class="parameter"> <span class="param-name">iouThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Must be between [0, 1]. Defaults to 0.5 (50% box overlap).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scoreThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to -inf, which means any score is accepted.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor1D</a>&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.nonMaxSuppressionPadded" href="#image.nonMaxSuppressionPadded"> tf.image.nonMaxSuppressionPadded</a> <span class="signature">(boxes, scores, maxOutputSize, iouThreshold?, scoreThreshold?, padToMaxOutputSize?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/non_max_suppression_padded.ts#L52-L81" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Asynchronously performs non maximum suppression of bounding boxes based on iou (intersection over union), with an option to pad results.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 2d tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the corners of the bounding box.</span> </li> <li class="parameter"> <span class="param-name">scores</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 1d tensor providing the box scores of shape <code>[numBoxes]</code>.</span> </li> <li class="parameter"> <span class="param-name">maxOutputSize</span> <span class="param-type">(number)</span> <span class="param-docs">The maximum number of boxes to be selected.</span> </li> <li class="parameter"> <span class="param-name">iouThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Must be between [0, 1]. Defaults to 0.5 (50% box overlap).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scoreThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to -inf, which means any score is accepted.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">padToMaxOutputSize</span> <span class="param-type">(boolean)</span> <span class="param-docs">Defaults to false. If true, size of output <code>selectedIndices</code> is padded to maxOutputSize.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.nonMaxSuppressionPaddedAsync" href="#image.nonMaxSuppressionPaddedAsync"> tf.image.nonMaxSuppressionPaddedAsync</a> <span class="signature">(boxes, scores, maxOutputSize, iouThreshold?, scoreThreshold?, padToMaxOutputSize?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/non_max_suppression_padded_async.ts#L49-L85" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Asynchronously performs non maximum suppression of bounding boxes based on iou (intersection over union), with an option to pad results.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 2d tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the corners of the bounding box.</span> </li> <li class="parameter"> <span class="param-name">scores</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 1d tensor providing the box scores of shape <code>[numBoxes]</code>.</span> </li> <li class="parameter"> <span class="param-name">maxOutputSize</span> <span class="param-type">(number)</span> <span class="param-docs">The maximum number of boxes to be selected.</span> </li> <li class="parameter"> <span class="param-name">iouThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Must be between [0, 1]. Defaults to 0.5 (50% box overlap).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scoreThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to -inf, which means any score is accepted.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">padToMaxOutputSize</span> <span class="param-type">(boolean)</span> <span class="param-docs">Defaults to false. If true, size of output <code>selectedIndices</code> is padded to maxOutputSize.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.nonMaxSuppressionWithScore" href="#image.nonMaxSuppressionWithScore"> tf.image.nonMaxSuppressionWithScore</a> <span class="signature">(boxes, scores, maxOutputSize, iouThreshold?, scoreThreshold?, softNmsSigma?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/non_max_suppression_with_score.ts#L58-L84" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Performs non maximum suppression of bounding boxes based on iou (intersection over union).</p> <p>This op also supports a Soft-NMS mode (cf. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes, therefore favoring different regions of the image with high scores. To enable this Soft-NMS mode, set the <code>softNmsSigma</code> parameter to be larger than 0.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 2d tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the corners of the bounding box.</span> </li> <li class="parameter"> <span class="param-name">scores</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 1d tensor providing the box scores of shape <code>[numBoxes]</code>.</span> </li> <li class="parameter"> <span class="param-name">maxOutputSize</span> <span class="param-type">(number)</span> <span class="param-docs">The maximum number of boxes to be selected.</span> </li> <li class="parameter"> <span class="param-name">iouThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Must be between [0, 1]. Defaults to 0.5 (50% box overlap).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scoreThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to -inf, which means any score is accepted.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">softNmsSigma</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the sigma parameter for Soft NMS. When sigma is 0, it falls back to nonMaxSuppression.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.nonMaxSuppressionWithScoreAsync" href="#image.nonMaxSuppressionWithScoreAsync"> tf.image.nonMaxSuppressionWithScoreAsync</a> <span class="signature">(boxes, scores, maxOutputSize, iouThreshold?, scoreThreshold?, softNmsSigma?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/non_max_suppression_with_score_async.ts#L54-L92" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Asynchronously performs non maximum suppression of bounding boxes based on iou (intersection over union).</p> <p>This op also supports a Soft-NMS mode (cf. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes, therefore favoring different regions of the image with high scores. To enable this Soft-NMS mode, set the <code>softNmsSigma</code> parameter to be larger than 0.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">boxes</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 2d tensor of shape <code>[numBoxes, 4]</code>. Each entry is <code>[y1, x1, y2, x2]</code>, where <code>(y1, x1)</code> and <code>(y2, x2)</code> are the corners of the bounding box.</span> </li> <li class="parameter"> <span class="param-name">scores</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">a 1d tensor providing the box scores of shape <code>[numBoxes]</code>.</span> </li> <li class="parameter"> <span class="param-name">maxOutputSize</span> <span class="param-type">(number)</span> <span class="param-docs">The maximum number of boxes to be selected.</span> </li> <li class="parameter"> <span class="param-name">iouThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Must be between [0, 1]. Defaults to 0.5 (50% box overlap).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">scoreThreshold</span> <span class="param-type">(number)</span> <span class="param-docs">A threshold for deciding when to remove boxes based on score. Defaults to -inf, which means any score is accepted.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">softNmsSigma</span> <span class="param-type">(number)</span> <span class="param-docs">A float representing the sigma parameter for Soft NMS. When sigma is 0, it falls back to nonMaxSuppression.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.resizeBilinear" href="#image.resizeBilinear"> tf.image.resizeBilinear</a> <span class="signature">(images, size, alignCorners?, halfPixelCenters?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/resize_bilinear.ts#L47-L87" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Bilinear resize a single 3D image or a batch of 3D images to a new shape.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">images</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The images, of rank 4 or rank 3, of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">size</span> <span class="param-type">([number, number])</span> <span class="param-docs">The new shape <code>[newHeight, newWidth]</code> to resize the images to. Each channel is resized individually.</span> </li> <li class="parameter"> <span class="param-name">alignCorners</span> <span class="param-type">(boolean)</span> <span class="param-docs">Defaults to <code>false</code>. If true, rescale input by <code>(new_height - 1) / (height - 1)</code>, which exactly aligns the 4 corners of images and resized images. If false, rescale by <code>new_height / height</code>. Treat similarly the width dimension.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">halfPixelCenters</span> <span class="param-type">(boolean)</span> <span class="param-docs">Defaults to <code>false</code>. Whether to assume pixel centers are at 0.5, which would make the floating point coordinates of the top left pixel 0.5, 0.5.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.resizeNearestNeighbor" href="#image.resizeNearestNeighbor"> tf.image.resizeNearestNeighbor</a> <span class="signature">(images, size, alignCorners?, halfPixelCenters?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/resize_nearest_neighbor.ts#L48-L91" target=_blank>Source</a> </span> </div> <div class="documentation"><p>NearestNeighbor resize a batch of 3D images to a new shape.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">images</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The images, of rank 4 or rank 3, of shape <code>[batch, height, width, inChannels]</code>. If rank 3, batch of 1 is assumed.</span> </li> <li class="parameter"> <span class="param-name">size</span> <span class="param-type">([number, number])</span> <span class="param-docs">The new shape <code>[newHeight, newWidth]</code> to resize the images to. Each channel is resized individually.</span> </li> <li class="parameter"> <span class="param-name">alignCorners</span> <span class="param-type">(boolean)</span> <span class="param-docs">Defaults to False. If true, rescale input by <code>(new_height - 1) / (height - 1)</code>, which exactly aligns the 4 corners of images and resized images. If false, rescale by <code>new_height / height</code>. Treat similarly the width dimension.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">halfPixelCenters</span> <span class="param-type">(boolean)</span> <span class="param-docs">Defaults to <code>false</code>. Whether to assume pixels are of half the actual dimensions, and yield more accurate resizes. This flag would also make the floating point coordinates of the top left pixel 0.5, 0.5.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.rgbToGrayscale" href="#image.rgbToGrayscale"> tf.image.rgbToGrayscale</a> <span class="signature">(image)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/rgb_to_grayscale.ts#L36-L82" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Converts images from RGB format to grayscale.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>| <a href="#class:Tensor">tf.Tensor6D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A RGB tensor to convert. The <code>image</code>'s last dimension must be size 3 with at least a two-dimensional shape.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a href="#class:Tensor">tf.Tensor4D</a>|<a href="#class:Tensor">tf.Tensor5D</a>| <a href="#class:Tensor">tf.Tensor6D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.rotateWithOffset" href="#image.rotateWithOffset"> tf.image.rotateWithOffset</a> <span class="signature">(image, radians, fillValue?, center?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/rotate_with_offset.ts#L45-L62" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Rotates the input image tensor counter-clockwise with an optional offset center of rotation. Currently available in the CPU, WebGL, and WASM backends.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">4d tensor of shape <code>[batch, imageHeight, imageWidth, depth]</code>.</span> </li> <li class="parameter"> <span class="param-name">radians</span> <span class="param-type">(number)</span> <span class="param-docs">The amount of rotation.</span> </li> <li class="parameter"> <span class="param-name">fillValue</span> <span class="param-type">(number|[number, number, number])</span> <span class="param-docs">The value to fill in the empty space leftover after rotation. Can be either a single grayscale value (0-255), or an array of three numbers <code>[red, green, blue]</code> specifying the red, green, and blue channels. Defaults to <code>0</code> (black).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">center</span> <span class="param-type">(number|[number, number])</span> <span class="param-docs">The center of rotation. Can be either a single value (0-1), or an array of two numbers <code>[centerX, centerY]</code>. Defaults to <code>0.5</code> (rotates the image around its center).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="image.transform" href="#image.transform"> tf.image.transform</a> <span class="signature">(image, transforms, interpolation?, fillMode?, fillValue?, outputShape?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/image/transform.ts#L58-L92" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Applies the given transform(s) to the image(s).</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor4D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">4d tensor of shape <code>[batch, imageHeight, imageWidth, depth]</code>.</span> </li> <li class="parameter"> <span class="param-name">transforms</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Projective transform matrix/matrices. A tensor1d of length 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the output point (x, y) to a transformed input point (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k), where k = c0 x + c1 y + 1. The transforms are inverted compared to the transform mapping input points to output points.</span> </li> <li class="parameter"> <span class="param-name">interpolation</span> <span class="param-type">('nearest'|'bilinear')</span> <span class="param-docs">Interpolation mode. Supported values: 'nearest', 'bilinear'. Default to 'nearest'.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">fillMode</span> <span class="param-type">('constant'|'reflect'|'wrap'|'nearest')</span> <span class="param-docs">Points outside the boundaries of the input are filled according to the given mode, one of 'constant', 'reflect', 'wrap', 'nearest'. Default to 'constant'. 'reflect': (d c b a | a b c d | d c b a ) The input is extended by reflecting about the edge of the last pixel. 'constant': (k k k k | a b c d | k k k k) The input is extended by filling all values beyond the edge with the same constant value k. 'wrap': (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge. 'nearest': (a a a a | a b c d | d d d d) The input is extended by the nearest pixel.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">fillValue</span> <span class="param-type">(number)</span> <span class="param-docs">A float represents the value to be filled outside the boundaries when fillMode is 'constant'.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">outputShape</span> <span class="param-type">([number, number])</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor4D</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-RNN" href="#Operations-RNN" class="symbol-link"> Operations / RNN </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="basicLSTMCell" href="#basicLSTMCell"> tf.basicLSTMCell</a> <span class="signature">(forgetBias, lstmKernel, lstmBias, data, c, h)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/basic_lstm_cell.ts#L47-L78" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the next state and output of a BasicLSTMCell.</p> <p>Returns <code>[newC, newH]</code>.</p> <p>Derived from tf.contrib.rnn.BasicLSTMCell.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">forgetBias</span> <span class="param-type">(<a href="#class:Tensor">tf.Scalar</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Forget bias for the cell.</span> </li> <li class="parameter"> <span class="param-name">lstmKernel</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The weights for the cell.</span> </li> <li class="parameter"> <span class="param-name">lstmBias</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The bias for the cell.</span> </li> <li class="parameter"> <span class="param-name">data</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input to the cell.</span> </li> <li class="parameter"> <span class="param-name">c</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Previous cell state.</span> </li> <li class="parameter"> <span class="param-name">h</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Previous cell output.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">[<a href="#class:Tensor">tf.Tensor2D</a>, <a href="#class:Tensor">tf.Tensor2D</a>]</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="multiRNNCell" href="#multiRNNCell"> tf.multiRNNCell</a> <span class="signature">(lstmCells, data, c, h)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/multi_rnn_cell.ts#L45-L68" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the next states and outputs of a stack of LSTMCells.</p> <p>Each cell output is used as input to the next cell.</p> <p>Returns <code>[cellState, cellOutput]</code>.</p> <p>Derived from tf.contrib.rn.MultiRNNCell.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">lstmCells</span> <span class="param-type">((data: <a href="#class:Tensor">tf.Tensor2D</a>, c: <a href="#class:Tensor">tf.Tensor2D</a>, h: <a href="#class:Tensor">tf.Tensor2D</a>): [<a href="#class:Tensor">tf.Tensor2D</a>, <a href="#class:Tensor">tf.Tensor2D</a>][])</span> <span class="param-docs">Array of LSTMCell functions.</span> </li> <li class="parameter"> <span class="param-name">data</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input to the cell.</span> </li> <li class="parameter"> <span class="param-name">c</span> <span class="param-type">(Array)</span> <span class="param-docs">Array of previous cell states.</span> </li> <li class="parameter"> <span class="param-name">h</span> <span class="param-type">(Array)</span> <span class="param-docs">Array of previous cell outputs.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">[<a href="#class:Tensor">tf.Tensor2D</a>[], <a href="#class:Tensor">tf.Tensor2D</a>[]]</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Logical" href="#Operations-Logical" class="symbol-link"> Operations / Logical </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="bitwiseAnd" href="#bitwiseAnd"> tf.bitwiseAnd</a> <span class="signature">(x, y)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/bitwise_and.ts#L48-L64" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Bitwise <code>AND</code> operation for input tensors.</p> <p>Given two input tensors, returns a new tensor with the <code>AND</code> calculated values.</p> <p>The method supports int32 values</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">5</span>, <span class="hljs-number">3</span>, <span class="hljs-number">14</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">5</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">11</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); tf.<span class="hljs-title function_">bitwiseAnd</span>(x, y).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The input tensor to be calculated.</span> </li> <li class="parameter"> <span class="param-name">y</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The input tensor to be calculated.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="equal" href="#equal"> tf.equal</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/equal.ts#L43-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of (a == b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">equal</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="greater" href="#greater"> tf.greater</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/greater.ts#L43-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of (a &gt; b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">greater</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="greaterEqual" href="#greaterEqual"> tf.greaterEqual</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/greater_equal.ts#L43-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of (a &gt;= b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">greaterEqual</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="less" href="#less"> tf.less</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/less.ts#L42-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of (a &lt; b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">less</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="lessEqual" href="#lessEqual"> tf.lessEqual</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/less_equal.ts#L43-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of (a &lt;= b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); a.<span class="hljs-title function_">lessEqual</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logicalAnd" href="#logicalAnd"> tf.logicalAnd</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/logical_and.ts#L42-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of <code>a AND b</code> element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); a.<span class="hljs-title function_">logicalAnd</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor. Must be of dtype bool.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must be of dtype bool.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logicalNot" href="#logicalNot"> tf.logicalNot</a> <span class="signature">(x)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/logical_not.ts#L39-L43" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of <code>NOT x</code> element-wise.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); a.<span class="hljs-title function_">logicalNot</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor. Must be of dtype 'bool'.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logicalOr" href="#logicalOr"> tf.logicalOr</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/logical_or.ts#L41-L49" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of <code>a OR b</code> element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); a.<span class="hljs-title function_">logicalOr</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor. Must be of dtype bool.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must be of dtype bool.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="logicalXor" href="#logicalXor"> tf.logicalXor</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/logical_xor.ts#L43-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of <code>a XOR b</code> element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); a.<span class="hljs-title function_">logicalXor</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor. Must be of dtype bool.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must be of dtype bool.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="notEqual" href="#notEqual"> tf.notEqual</a> <span class="signature">(a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/not_equal.ts#L42-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the truth value of (a != b) element-wise. Supports broadcasting.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); a.<span class="hljs-title function_">notEqual</span>(b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The first input tensor.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The second input tensor. Must have the same dtype as <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="where" href="#where"> tf.where</a> <span class="signature">(condition, a, b)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/where.ts#L52-L72" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the elements, either <code>a</code> or <code>b</code> depending on the <code>condition</code>.</p> <p>If the condition is true, select from <code>a</code>, otherwise select from <code>b</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cond = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span> , <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, -<span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]); a.<span class="hljs-title function_">where</span>(cond, b).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">condition</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input condition. Must be of dtype bool.</span> </li> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">If <code>condition</code> is rank 1, <code>a</code> may have a higher rank but its first dimension must match the size of <code>condition</code>.</span> </li> <li class="parameter"> <span class="param-name">b</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A tensor with the same dtype as <code>a</code> and with shape that is compatible with <code>a</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="whereAsync" href="#whereAsync"> tf.whereAsync</a> <span class="signature">(condition)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/where_async.ts#L42-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the coordinates of true elements of condition.</p> <p>The coordinates are returned in a 2-D tensor where the first dimension (rows) represents the number of true elements, and the second dimension (columns) represents the coordinates of the true elements. Keep in mind, the shape of the output tensor can vary depending on how many true values there are in input. Indices are output in row-major order. The resulting tensor has the shape <code>[numTrueElems, condition.rank]</code>.</p> <p>This is analogous to calling the python <code>tf.where(cond)</code> without an x or y.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> cond = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-literal">false</span>, <span class="hljs-literal">false</span>, <span class="hljs-literal">true</span>], <span class="hljs-string">&#x27;bool&#x27;</span>); <span class="hljs-keyword">const</span> result = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">whereAsync</span>(cond); result.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">condition</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor2D</a>&gt;</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Scan" href="#Operations-Scan" class="symbol-link"> Operations / Scan </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="cumprod" href="#cumprod"> tf.cumprod</a> <span class="signature">(x, axis?, exclusive?, reverse?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/cumprod.ts#L51-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the cumulative product of a <a href="#class:Tensor">tf.Tensor</a> along <code>axis</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">cumprod</span>().<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); x.<span class="hljs-title function_">cumprod</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a> | <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to cumulatively multiply.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis along which to multiply. Optional. Defaults to 0.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">exclusive</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to perform exclusive cumulative product. Optional. Defaults to false. If set to true then the product of each tensor entry does not include its own value, but only the values previous to it along the specified axis.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reverse</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to multiply in the opposite direction. Optional. Defaults to false.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="cumsum" href="#cumsum"> tf.cumsum</a> <span class="signature">(x, axis?, exclusive?, reverse?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/cumsum.ts#L51-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the cumulative sum of a <a href="#class:Tensor">tf.Tensor</a> along <code>axis</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); x.<span class="hljs-title function_">cumsum</span>().<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); x.<span class="hljs-title function_">cumsum</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The input tensor to be summed.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis along which to sum. Optional. Defaults to 0.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">exclusive</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to perform exclusive cumulative sum. Optional. Defaults to false. If set to true then the sum of each tensor entry does not include its own value, but only the values previous to it along the specified axis.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reverse</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to sum in the opposite direction. Optional. Defaults to false.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Evaluation" href="#Operations-Evaluation" class="symbol-link"> Operations / Evaluation </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="confusionMatrix" href="#confusionMatrix"> tf.confusionMatrix</a> <span class="signature">(labels, predictions, numClasses)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/confusion_matrix.ts#L58-L94" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the confusion matrix from true labels and predicted labels.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> labels = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> predictions = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> numClasses = <span class="hljs-number">3</span>; <span class="hljs-keyword">const</span> out = tf.<span class="hljs-property">math</span>.<span class="hljs-title function_">confusionMatrix</span>(labels, predictions, numClasses); out.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// Expected output matrix:</span> <span class="hljs-comment">// [[2, 0, 0],</span> <span class="hljs-comment">// [0, 1, 1],</span> <span class="hljs-comment">// [0, 0, 1]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The target labels, assumed to be 0-based integers for the classes. The shape is <code>[numExamples]</code>, where <code>numExamples</code> is the number of examples included.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted classes, assumed to be 0-based integers for the classes. Must have the same shape as <code>labels</code>.</span> </li> <li class="parameter"> <span class="param-name">numClasses</span> <span class="param-type">(number)</span> <span class="param-docs">Number of all classes, as an integer. Its value must be larger than the largest element in <code>labels</code> and <code>predictions</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="inTopKAsync" href="#inTopKAsync"> tf.inTopKAsync</a> <span class="signature">(predictions, targets, k?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/in_top_k.ts#L41-L101" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns whether the targets are in the top K predictions.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> predictions = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">20</span>, <span class="hljs-number">10</span>, <span class="hljs-number">40</span>, <span class="hljs-number">30</span>], [<span class="hljs-number">30</span>, <span class="hljs-number">50</span>, -<span class="hljs-number">20</span>, <span class="hljs-number">10</span>]]); <span class="hljs-keyword">const</span> targets = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">0</span>]); <span class="hljs-keyword">const</span> precision = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">inTopKAsync</span>(predictions, targets); precision.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">2-D or higher <a href="#class:Tensor">tf.Tensor</a> with last dimension being at least <code>k</code>.</span> </li> <li class="parameter"> <span class="param-name">targets</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">1-D or higher <a href="#class:Tensor">tf.Tensor</a>.</span> </li> <li class="parameter"> <span class="param-name">k</span> <span class="param-type">(number)</span> <span class="param-docs">Optional Number of top elements to look at for computing precision, default to 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor</a>&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="lowerBound" href="#lowerBound"> tf.lowerBound</a> <span class="signature">(sortedSequence, values)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/lower_bound.ts#L69-L72" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Searches for where a value would go in a sorted sequence.</p> <p>This is not a method for checking containment (like javascript in).</p> <p>The typical use case for this operation is &quot;binning&quot;, &quot;bucketing&quot;, or &quot;discretizing&quot;. The values are assigned to bucket-indices based on the edges listed in 'sortedSequence'. This operation returns the bucket-index for each value.</p> <p>The index returned corresponds to the first edge greater than or equal to the value.</p> <p>The axis is not settable for this operation. It always operates on the innermost dimension (axis=-1). The operation will accept any number of outer dimensions.</p> <p>Note: This operation assumes that 'lowerBound' is sorted along the innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not sorted no error is raised and the content of the returned tensor is not well defined.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> edges = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">3.3</span>, <span class="hljs-number">9.1</span>, <span class="hljs-number">10.0</span>]); <span class="hljs-keyword">let</span> values = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0.0</span>, <span class="hljs-number">4.1</span>, <span class="hljs-number">12.0</span>]); <span class="hljs-keyword">const</span> result1 = tf.<span class="hljs-title function_">lowerBound</span>(edges, values); result1.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [1, 2, 4]</span> <span class="hljs-keyword">const</span> seq = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">3</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">10</span>]); values = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">4</span>, <span class="hljs-number">10</span>]); <span class="hljs-keyword">const</span> result2 = tf.<span class="hljs-title function_">lowerBound</span>(seq, values); result2.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 2, 3]</span> <span class="hljs-keyword">const</span> sortedSequence = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0.</span>, <span class="hljs-number">3.</span>, <span class="hljs-number">8.</span>, <span class="hljs-number">9.</span>, <span class="hljs-number">10.</span>], [<span class="hljs-number">1.</span>, <span class="hljs-number">2.</span>, <span class="hljs-number">3.</span>, <span class="hljs-number">4.</span>, <span class="hljs-number">5.</span>]]); values = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">9.8</span>, <span class="hljs-number">2.1</span>, <span class="hljs-number">4.3</span>], [<span class="hljs-number">0.1</span>, <span class="hljs-number">6.6</span>, <span class="hljs-number">4.5</span>, ]]); <span class="hljs-keyword">const</span> result3 = tf.<span class="hljs-title function_">lowerBound</span>(sortedSequence, values); result3.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[4, 1, 2], [0, 5, 4]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">sortedSequence</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: N-D. Sorted sequence.</span> </li> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: N-D. Search values.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="searchSorted" href="#searchSorted"> tf.searchSorted</a> <span class="signature">(sortedSequence, values, side?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/search_sorted.ts#L80-L114" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Searches for where a value would go in a sorted sequence.</p> <p>This is not a method for checking containment (like javascript in).</p> <p>The typical use case for this operation is &quot;binning&quot;, &quot;bucketing&quot;, or &quot;discretizing&quot;. The values are assigned to bucket-indices based on the edges listed in 'sortedSequence'. This operation returns the bucket-index for each value.</p> <p>The side argument controls which index is returned if a value lands exactly on an edge.</p> <p>The axis is not settable for this operation. It always operates on the innermost dimension (axis=-1). The operation will accept any number of outer dimensions.</p> <p>Note: This operation assumes that 'sortedSequence' is sorted along the innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not sorted no error is raised and the content of the returned tensor is not well defined.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> edges = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, <span class="hljs-number">3.3</span>, <span class="hljs-number">9.1</span>, <span class="hljs-number">10.0</span>]); <span class="hljs-keyword">let</span> values = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0.0</span>, <span class="hljs-number">4.1</span>, <span class="hljs-number">12.0</span>]); <span class="hljs-keyword">const</span> result1 = tf.<span class="hljs-title function_">searchSorted</span>(edges, values, <span class="hljs-string">&#x27;left&#x27;</span>); result1.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [1, 2, 4]</span> <span class="hljs-keyword">const</span> seq = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">3</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">10</span>]); values = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">4</span>, <span class="hljs-number">10</span>]); <span class="hljs-keyword">const</span> result2 = tf.<span class="hljs-title function_">searchSorted</span>(seq, values, <span class="hljs-string">&#x27;left&#x27;</span>); result2.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 2, 3]</span> <span class="hljs-keyword">const</span> result3 = tf.<span class="hljs-title function_">searchSorted</span>(seq, values, <span class="hljs-string">&#x27;right&#x27;</span>); result3.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [1, 2, 5]</span> <span class="hljs-keyword">const</span> sortedSequence = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0.</span>, <span class="hljs-number">3.</span>, <span class="hljs-number">8.</span>, <span class="hljs-number">9.</span>, <span class="hljs-number">10.</span>], [<span class="hljs-number">1.</span>, <span class="hljs-number">2.</span>, <span class="hljs-number">3.</span>, <span class="hljs-number">4.</span>, <span class="hljs-number">5.</span>]]); values = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">9.8</span>, <span class="hljs-number">2.1</span>, <span class="hljs-number">4.3</span>], [<span class="hljs-number">0.1</span>, <span class="hljs-number">6.6</span>, <span class="hljs-number">4.5</span>, ]]); <span class="hljs-keyword">const</span> result4 = tf.<span class="hljs-title function_">searchSorted</span>(sortedSequence, values, <span class="hljs-string">&#x27;left&#x27;</span>); result4.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[4, 1, 2], [0, 5, 4]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">sortedSequence</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: N-D. Sorted sequence.</span> </li> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: N-D. Search values.</span> </li> <li class="parameter"> <span class="param-name">side</span> <span class="param-type">('left'|'right')</span> <span class="param-docs">: 'left'|'right'. Defaults to 'left'. 'left' corresponds to lower bound and 'right' to upper bound.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="topk" href="#topk"> tf.topk</a> <span class="signature">(x, k?, sorted?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/topk.ts#L52-L78" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Finds the values and indices of the <code>k</code> largest entries along the last dimension.</p> <p>If the input is a vector (rank=1), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus values[j] is the j-th largest entry in input, and its index is indices[j]. For higher rank inputs, computes the top k entries along the last dimension.</p> <p>If two elements are equal, the lower-index element appears first.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">5</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">3</span>]]); <span class="hljs-keyword">const</span> {values, indices} = tf.<span class="hljs-title function_">topk</span>(a); values.<span class="hljs-title function_">print</span>(); indices.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">1-D or higher <a href="#class:Tensor">tf.Tensor</a> with last dimension being at least <code>k</code>.</span> </li> <li class="parameter"> <span class="param-name">k</span> <span class="param-type">(number)</span> <span class="param-docs">Number of top elements to look for along the last dimension.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">sorted</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, the resulting <code>k</code> elements will be sorted by the values in descending order.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{values: <a href="#class:Tensor">tf.Tensor</a>, indices: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="unique" href="#unique"> tf.unique</a> <span class="signature">(x, axis?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/unique.ts#L78-L89" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Finds unique elements along an axis of a tensor.</p> <p>It returns a tensor <code>values</code> containing all of the unique elements along the <code>axis</code> of the given tensor <code>x</code> in the same order that they occur along the <code>axis</code> in <code>x</code>; <code>x</code> does not need to be sorted. It also returns a tensor <code>indices</code> the same size as the number of the elements in <code>x</code> along the <code>axis</code> dimension. It contains the index in the unique output <code>values</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// A 1-D tensor</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>, <span class="hljs-number">4</span>, <span class="hljs-number">4</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>]); <span class="hljs-keyword">const</span> {values, indices} = tf.<span class="hljs-title function_">unique</span>(a); values.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [1, 2, 4, 7, 8,]</span> indices.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 0, 1, 2, 2, 2, 3, 4, 4]</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// A 2-D tensor with axis=0</span> <span class="hljs-comment">//</span> <span class="hljs-comment">// &#x27;a&#x27; is: [[1, 0, 0],</span> <span class="hljs-comment">// [1, 0, 0],</span> <span class="hljs-comment">// [2, 0, 0]]</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]); <span class="hljs-keyword">const</span> {values, indices} = tf.<span class="hljs-title function_">unique</span>(a, <span class="hljs-number">0</span>) values.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[1, 0, 0],</span> <span class="hljs-comment">// [2, 0, 0]]</span> indices.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 0, 1]</span> </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// A 2-D tensor with axis=1</span> <span class="hljs-comment">//</span> <span class="hljs-comment">// &#x27;a&#x27; is: [[1, 0, 0],</span> <span class="hljs-comment">// [1, 0, 0],</span> <span class="hljs-comment">// [2, 0, 0]]</span> <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]]); <span class="hljs-keyword">const</span> {values, indices} = tf.<span class="hljs-title function_">unique</span>(a, <span class="hljs-number">1</span>) values.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[1, 0],</span> <span class="hljs-comment">// [1, 0],</span> <span class="hljs-comment">// [2, 0]]</span> indices.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 1, 1]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A tensor (int32, string, bool).</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The axis of the tensor to find the unique elements.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{values: <a href="#class:Tensor">tf.Tensor</a>, indices: <a href="#class:Tensor">tf.Tensor1D</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="upperBound" href="#upperBound"> tf.upperBound</a> <span class="signature">(sortedSequence, values)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/upper_bound.ts#L56-L59" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Searches for where a value would go in a sorted sequence.</p> <p>This is not a method for checking containment (like javascript in).</p> <p>The typical use case for this operation is &quot;binning&quot;, &quot;bucketing&quot;, or &quot;discretizing&quot;. The values are assigned to bucket-indices based on the edges listed in 'sortedSequence'. This operation returns the bucket-index for each value.</p> <p>The index returned corresponds to the first edge greater than the value.</p> <p>The axis is not settable for this operation. It always operates on the innermost dimension (axis=-1). The operation will accept any number of outer dimensions.</p> <p>Note: This operation assumes that 'upperBound' is sorted along the innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not sorted no error is raised and the content of the returned tensor is not well defined.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> seq = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">3</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">10</span>]); <span class="hljs-keyword">const</span> values = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">4</span>, <span class="hljs-number">10</span>]); <span class="hljs-keyword">const</span> result = tf.<span class="hljs-title function_">upperBound</span>(seq, values); result.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [1, 2, 5]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">sortedSequence</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: N-D. Sorted sequence.</span> </li> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: N-D. Search values.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Slicing and Joining" href="#Operations-Slicing and Joining" class="symbol-link"> Operations / Slicing and Joining </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="gatherND" href="#gatherND"> tf.gatherND</a> <span class="signature">(x, indices)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/gather_nd.ts#L63-L70" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Gather slices from input tensor into a Tensor with shape specified by <code>indices</code>.</p> <p><code>indices</code> is a K-dimensional integer tensor, best thought of as a (K-1)-dimensional tensor of indices into input, where each element defines a slice of input: output[\(i_0, ..., i_{K-2}\)] = input[indices[\(i_0, ..., i_{K-2}\)]]</p> <p>Whereas in <a href="#gather">tf.gather()</a>, <code>indices</code> defines slices into the first dimension of input, in <a href="#gatherND">tf.gatherND()</a>, <code>indices</code> defines slices into the first N dimensions of input, where N = indices.shape[-1].</p> <p>The last dimension of indices can be at most the rank of input: indices.shape[-1] &lt;= input.rank</p> <p>The last dimension of <code>indices</code> corresponds to elements (if indices.shape[-1] == input.rank) or slices (if indices.shape[-1] &lt; input.rank) along dimension indices.shape[-1] of input. The output tensor has shape indices.shape[:-1] + input.shape[indices.shape[-1]:]</p> <p>Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> indices = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>,<span class="hljs-number">2</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); tf.<span class="hljs-title function_">gatherND</span>(input, indices).<span class="hljs-title function_">print</span>() <span class="hljs-comment">// [10, 11]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor from which to gather values.</span> </li> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Index tensor, must be of type int32.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="meshgrid" href="#meshgrid"> tf.meshgrid</a> <span class="signature">(x?, y?, __2?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/meshgrid.ts#L57-L93" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Broadcasts parameters for evaluation on an N-D grid.</p> <p>Given N one-dimensional coordinate arrays <code>*args</code>, returns a list <code>outputs</code> of N-D coordinate arrays for evaluating expressions on an N-D grid.</p> <p>Notes: <code>meshgrid</code> supports cartesian ('xy') and matrix ('ij') indexing conventions. When the <code>indexing</code> argument is set to 'xy' (the default), the broadcasting instructions for the first two dimensions are swapped. Examples: Calling <code>const [X, Y] = meshgrid(x, y)</code> with the tensors</p> <pre><code class="language-javascript">const x = [1, 2, 3]; const y = [4, 5, 6]; const [X, Y] = tf.meshgrid(x, y); // X = [[1, 2, 3], // [1, 2, 3], // [1, 2, 3]] // Y = [[4, 4, 4], // [5, 5, 5], // [6, 6, 6]] </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor with rank geq 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">y</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor with rank geq 1.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">__2</span> <span class="param-type">({ indexing?: string; })</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="scatterND" href="#scatterND"> tf.scatterND</a> <span class="signature">(indices, updates, shape)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/scatter_nd.ts#L49-L64" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a new tensor by applying sparse updates to individual values or slices within a zero tensor of the given shape tensor according to indices. This operator is the inverse of the <a href="#gatherND">tf.gatherND()</a> operator which extracts values or slices from a given tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> indices = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">1</span>, <span class="hljs-number">7</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> updates = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>]); <span class="hljs-keyword">const</span> shape = [<span class="hljs-number">8</span>]; tf.<span class="hljs-title function_">scatterND</span>(indices, updates, shape).<span class="hljs-title function_">print</span>() <span class="hljs-comment">//[0, 11, 0, 10, 9, 0, 0, 12]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor contains the indices into the output tensor.</span> </li> <li class="parameter"> <span class="param-name">updates</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor contains the value for the indices.</span> </li> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs">: The shape of the output tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="stridedSlice" href="#stridedSlice"> tf.stridedSlice</a> <span class="signature">(x, begin, end, strides?, beginMask?, endMask?, ellipsisMask?, newAxisMask?, shrinkAxisMask?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/strided_slice.ts#L61-L82" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Extracts a strided slice of a tensor.</p> <p>Roughly speaking, this op extracts a slice of size (end-begin)/stride from the given input tensor (x). Starting at the location specified by begin the slice continues by adding stride to the index until all dimensions are not less than end. Note that a stride can be negative, which causes a reverse slice.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> t = tf.<span class="hljs-title function_">tensor3d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span> ,<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">4</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">5</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">6</span>, <span class="hljs-number">6</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); t.<span class="hljs-title function_">stridedSlice</span>([<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">1</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>() <span class="hljs-comment">// [[[3, 3, 3]]]</span> t.<span class="hljs-title function_">stridedSlice</span>([<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>() <span class="hljs-comment">// [[[3, 3, 3],</span> <span class="hljs-comment">// [4, 4, 4]]]</span> t.<span class="hljs-title function_">stridedSlice</span>([<span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, -<span class="hljs-number">3</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">1</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">1</span>]).<span class="hljs-title function_">print</span>() <span class="hljs-comment">// [[[4, 4, 4],</span> <span class="hljs-comment">// [3, 3, 3]]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to stride slice.</span> </li> <li class="parameter"> <span class="param-name">begin</span> <span class="param-type">(number[])</span> <span class="param-docs">The coordinates to start the slice from.</span> </li> <li class="parameter"> <span class="param-name">end</span> <span class="param-type">(number[])</span> <span class="param-docs">: The coordinates to end the slice at.</span> </li> <li class="parameter"> <span class="param-name">strides</span> <span class="param-type">(number[])</span> <span class="param-docs">: The size of the slice.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">beginMask</span> <span class="param-type">(number)</span> <span class="param-docs">: If the ith bit of beginMask is set, begin[i] is ignored and the fullest possible range in that dimension is used instead.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">endMask</span> <span class="param-type">(number)</span> <span class="param-docs">: If the ith bit of endMask is set, end[i] is ignored and the fullest possible range in that dimension is used instead.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">ellipsisMask</span> <span class="param-type">(number)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">newAxisMask</span> <span class="param-type">(number)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">shrinkAxisMask</span> <span class="param-type">(number)</span> <span class="param-docs">: a bitmask where bit i implies that the ith specification should shrink the dimensionality. begin and end must imply a slice of size 1 in the dimension.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tensorScatterUpdate" href="#tensorScatterUpdate"> tf.tensorScatterUpdate</a> <span class="signature">(tensor, indices, updates)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/tensor_scatter_update.ts#L60-L85" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a new tensor by applying sparse updates to individual values or slices to the passed in tensor according to indices. This operator is the similar to scatterNd op, except that the udpates are scattered on an existing tensor (as opposed to a zero-tensor).</p> <p>If indices contains duplicates, then we pick the last update for the index.</p> <p>If an out of bound index is found on CPU, an error is returned.</p> <p>Warning: There are some GPU specific semantics for this operation.</p> <ul> <li>If an out of bound index is found, the index is ignored.</li> <li>The order in which updates are applied is nondeterministic, so the output will be nondeterministic if indices contains duplicates.</li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> shape = [<span class="hljs-number">8</span>]; <span class="hljs-keyword">const</span> tensor = tf.<span class="hljs-title function_">ones</span>(shape); <span class="hljs-keyword">const</span> indices = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">1</span>, <span class="hljs-number">7</span>], [<span class="hljs-number">4</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> updates = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">9</span>, <span class="hljs-number">10</span>, <span class="hljs-number">11</span>, <span class="hljs-number">12</span>]); tf.<span class="hljs-title function_">tensorScatterUpdate</span>(tensor, indices, updates).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">//[1, 11, 1, 10, 9, 1, 1, 12]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">tensor</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A Tensor. Tensor to copy/update.</span> </li> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor contains the indices into the output tensor, must have at least 2 axes: (num_updates, index_depth).</span> </li> <li class="parameter"> <span class="param-name">updates</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor contains the value for the indices.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Ragged" href="#Operations-Ragged" class="symbol-link"> Operations / Ragged </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="raggedTensorToTensor" href="#raggedTensorToTensor"> tf.raggedTensorToTensor</a> <span class="signature">(shape, values, defaultValue, rowPartitionTensors, rowPartitionTypes)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/ragged_tensor_to_tensor.ts#L78-L100" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create a dense tensor from a ragged tensor, possibly altering its shape.</p> <p>The raggedTensorToTensor op creates a dense tensor from am array of row partition tensors, a value vector, and default values. If the shape is unspecified, the minimal shape required to contain all the elements in the ragged tensor (the natural shape) will be used. If some dimensions are left unspecified, then the size of the natural shape is used in that dimension.</p> <p>The defaultValue will be broadcast to the output shape. After that, the values from the ragged tensor overwrite the default values. Note that the defaultValue must have less dimensions than the value.</p> <p>The row partition tensors are in the order of the dimensions. At present, the types can be: &quot;ROW_SPLITS&quot;: the row_splits tensor from the ragged tensor. &quot;VALUE_ROWIDS&quot;: the value_rowids tensor from the ragged tensor. &quot;FIRST_DIM_SIZE&quot;: if value_rowids is used for the first dimension, then it is preceded by &quot;FIRST_DIM_SIZE&quot;.</p> <pre><code></code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A Tensor. Must be one of the following types: 'int32'. The desired shape of the output tensor. If left unspecified (empty), the minimal shape required to contain all the elements in the ragged tensor (the natural shape) will be used. If some dimensions are left unspecified, then the size of the natural shape is used in that dimension.</p> <p>Note that dense dimensions cannot be modified by the shape argument. Trying to change the size of a dense dimension will cause the op to fail. Examples: natural shape: [4, 5, 6] shape: -1 output shape: [4, 5, 6]</p> <p>natural shape: [4, 5, 6] shape: [3, -1, 2] output shape: [3, 5, 2]</p> <p>natural shape: [4, 5, 6] shape: [3, 7, 2] output shape: [3, 7, 2]</span> </li> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A Tensor. A 1D tensor representing the values of the ragged tensor.</span> </li> <li class="parameter"> <span class="param-name">defaultValue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A Tensor. Must have the same type as values. The defaultValue when the shape is larger than the ragged tensor. The defaultValue is broadcast until it is the shape of the output tensor, and then overwritten by values in the ragged tensor. The default value must be compatible with this broadcast operation, and must have fewer dimensions than the value tensor.</span> </li> <li class="parameter"> <span class="param-name">rowPartitionTensors</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>[])</span> <span class="param-docs">: A list of at least 1 Tensor objects with the same type in: 'int32'.</span> </li> <li class="parameter"> <span class="param-name">rowPartitionTypes</span> <span class="param-type">(string[])</span> <span class="param-docs">: A list of strings. The types of the row partition tensors. At present, these can be: &quot;ROW_SPLITS&quot;: the row_splits tensor from the ragged tensor. &quot;VALUE_ROWIDS&quot;: the value_rowids tensor from the ragged tensor. &quot;FIRST_DIM_SIZE&quot;: if value_rowids is used for the first dimension, then it is preceded by &quot;FIRST_DIM_SIZE&quot;. The tensors are in the order of the dimensions.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Spectral" href="#Operations-Spectral" class="symbol-link"> Operations / Spectral </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="spectral.fft" href="#spectral.fft"> tf.spectral.fft</a> <span class="signature">(input)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/spectral/fft.ts#L42-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Fast Fourier transform.</p> <p>Computes the 1-dimensional discrete Fourier transform over the inner-most dimension of input.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> real = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> imag = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">complex</span>(real, imag); x.<span class="hljs-title function_">fft</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// tf.spectral.fft(x).print();</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The complex input to compute an fft over.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="spectral.ifft" href="#spectral.ifft"> tf.spectral.ifft</a> <span class="signature">(input)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/spectral/ifft.ts#L42-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Inverse fast Fourier transform.</p> <p>Computes the inverse 1-dimensional discrete Fourier transform over the inner-most dimension of input.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> real = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> imag = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">complex</span>(real, imag); x.<span class="hljs-title function_">ifft</span>().<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// tf.spectral.ifft(x).print();</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The complex input to compute an ifft over.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="spectral.irfft" href="#spectral.irfft"> tf.spectral.irfft</a> <span class="signature">(input)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/spectral/irfft.ts#L49-L84" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Inversed real value input fast Fourier transform.</p> <p>Computes the 1-dimensional inversed discrete Fourier transform over the inner-most dimension of the real input.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> real = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> imag = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">complex</span>(real, imag); x.<span class="hljs-title function_">irfft</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The real value input to compute an irfft over.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="spectral.rfft" href="#spectral.rfft"> tf.spectral.rfft</a> <span class="signature">(input, fftLength?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/spectral/rfft.ts#L48-L97" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Real value input fast Fourier transform.</p> <p>Computes the 1-dimensional discrete Fourier transform over the inner-most dimension of the real input.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> real = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); real.<span class="hljs-title function_">rfft</span>().<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The real value input to compute an rfft over.</span> </li> <li class="parameter"> <span class="param-name">fftLength</span> <span class="param-type">(number)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Segment" href="#Operations-Segment" class="symbol-link"> Operations / Segment </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="unsortedSegmentSum" href="#unsortedSegmentSum"> tf.unsortedSegmentSum</a> <span class="signature">(x, segmentIds, numSegments)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/unsorted_segment_sum.ts#L47-L60" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the sum along segments of a <a href="#class:Tensor">tf.Tensor</a>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> segmentIds = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>); <span class="hljs-keyword">const</span> numSegments = <span class="hljs-number">3</span>; x.<span class="hljs-title function_">unsortedSegmentSum</span>(segmentIds, numSegments).<span class="hljs-title function_">print</span>() <span class="hljs-comment">//or tf.unsortedSegmentSum(x, segmentIds, numSegments)</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The <a href="#class:Tensor">tf.Tensor</a> that will be summed along its segments.</span> </li> <li class="parameter"> <span class="param-name">segmentIds</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A <a href="#class:Tensor">tf.Tensor1D</a> whose rank is equal to the rank of <code>x</code>'s dimension along the <code>axis</code>. Maps each element of <code>x</code> to a segment.</span> </li> <li class="parameter"> <span class="param-name">numSegments</span> <span class="param-type">(number)</span> <span class="param-docs">The number of distinct <code>segmentIds</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Moving Average" href="#Operations-Moving Average" class="symbol-link"> Operations / Moving Average </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="movingAverage" href="#movingAverage"> tf.movingAverage</a> <span class="signature">(v, x, decay, step?, zeroDebias?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/moving_average.ts#L60-L82" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Compute the moving average of a variable.</p> <p>Without zeroDebias, the moving average operation is defined by: <code>v += delta</code> where <code>delta = (1 - decay) * (x - v)</code></p> <p>With zeroDebias (default), the <code>delta</code> term is scaled to debias the effect of the (assumed) zero-initialization of <code>v</code>. <code>delta /= (1 - decay ^ step)</code></p> <p>For more details on the zero-debiasing algorithm, see: https://arxiv.org/abs/1412.6980</p> <p>Note that this function is completely stateless and does not keep track of step count. The step count needs to be maintained by the caller and passed in as <code>step</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">v</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The current moving average value.</span> </li> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">New input value, must have the same shape and dtype as <code>v</code>.</span> </li> <li class="parameter"> <span class="param-name">decay</span> <span class="param-type">(number|<a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">The decay factor. Typical values are 0.95 and 0.99.</span> </li> <li class="parameter"> <span class="param-name">step</span> <span class="param-type">(number|<a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">Step count.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">zeroDebias</span> <span class="param-type">(boolean)</span> <span class="param-docs">: Whether zeroDebias is to be performed (default: <code>true</code>).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Dropout" href="#Operations-Dropout" class="symbol-link"> Operations / Dropout </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="dropout" href="#dropout"> tf.dropout</a> <span class="signature">(x, rate, noiseShape?, seed?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/dropout.ts#L53-L77" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes dropout.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>]); <span class="hljs-keyword">const</span> rate = <span class="hljs-number">0.75</span>; <span class="hljs-keyword">const</span> output = tf.<span class="hljs-title function_">dropout</span>(x, rate); output.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A floating point Tensor or TensorLike.</span> </li> <li class="parameter"> <span class="param-name">rate</span> <span class="param-type">(number)</span> <span class="param-docs">A float in the range [0, 1). The probability that each element of x is discarded.</span> </li> <li class="parameter"> <span class="param-name">noiseShape</span> <span class="param-type">(number[])</span> <span class="param-docs">An array of numbers of type int32, representing the shape for randomly generated keep/drop flags. If the noiseShape has null value, it will be automatically replaced with the x's relative dimension size. Optional.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(number|string)</span> <span class="param-docs">Used to create random seeds. Optional.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Signal" href="#Operations-Signal" class="symbol-link"> Operations / Signal </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="signal.frame" href="#signal.frame"> tf.signal.frame</a> <span class="signature">(signal, frameLength, frameStep, padEnd?, padValue?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/signal/frame.ts#L42-L68" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Expands input into frames of frameLength. Slides a window size with frameStep.</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-property">signal</span>.<span class="hljs-title function_">frame</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], <span class="hljs-number">2</span>, <span class="hljs-number">1</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">signal</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>)</span> <span class="param-docs">The input tensor to be expanded</span> </li> <li class="parameter"> <span class="param-name">frameLength</span> <span class="param-type">(number)</span> <span class="param-docs">Length of each frame</span> </li> <li class="parameter"> <span class="param-name">frameStep</span> <span class="param-type">(number)</span> <span class="param-docs">The frame hop size in samples.</span> </li> <li class="parameter"> <span class="param-name">padEnd</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to pad the end of signal with padValue.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">padValue</span> <span class="param-type">(number)</span> <span class="param-docs">A number to use where the input signal does not exist when padEnd is True.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="signal.hammingWindow" href="#signal.hammingWindow"> tf.signal.hammingWindow</a> <span class="signature">(windowLength)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/signal/hamming_window.ts#L34-L36" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Generate a hamming window.</p> <p>See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-property">signal</span>.<span class="hljs-title function_">hammingWindow</span>(<span class="hljs-number">10</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">windowLength</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="signal.hannWindow" href="#signal.hannWindow"> tf.signal.hannWindow</a> <span class="signature">(windowLength)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/signal/hann_window.ts#L34-L36" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Generate a Hann window.</p> <p>See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows</p> <pre class="hljs"><code class="hljs language-js">tf.<span class="hljs-property">signal</span>.<span class="hljs-title function_">hannWindow</span>(<span class="hljs-number">10</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">windowLength</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="signal.stft" href="#signal.stft"> tf.signal.stft</a> <span class="signature">(signal, frameLength, frameStep, fftLength?, windowFn?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/signal/stft.ts#L43-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the Short-time Fourier Transform of signals See: https://en.wikipedia.org/wiki/Short-time_Fourier_transform</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> input = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]) tf.<span class="hljs-property">signal</span>.<span class="hljs-title function_">stft</span>(input, <span class="hljs-number">3</span>, <span class="hljs-number">1</span>).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">signal</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>)</span> <span class="param-docs">1-dimensional real value tensor.</span> </li> <li class="parameter"> <span class="param-name">frameLength</span> <span class="param-type">(number)</span> <span class="param-docs">The window length of samples.</span> </li> <li class="parameter"> <span class="param-name">frameStep</span> <span class="param-type">(number)</span> <span class="param-docs">The number of samples to step.</span> </li> <li class="parameter"> <span class="param-name">fftLength</span> <span class="param-type">(number)</span> <span class="param-docs">The size of the FFT to apply.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">windowFn</span> <span class="param-type">((length: number) =&gt; <a href="#class:Tensor">tf.Tensor1D</a>)</span> <span class="param-docs">A callable that takes a window length and returns 1-d tensor.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Linear Algebra" href="#Operations-Linear Algebra" class="symbol-link"> Operations / Linear Algebra </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="linalg.bandPart" href="#linalg.bandPart"> tf.linalg.bandPart</a> <span class="signature">(a, numLower, numUpper)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/linalg/band_part.ts#L76-L139" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Copy a tensor setting everything outside a central band in each innermost matrix to zero.</p> <p>The band part is computed as follows: Assume input has <code>k</code> dimensions <code>[I, J, K, ..., M, N]</code>, then the output is a tensor with the same shape where <code>band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]</code>. The indicator function <code>in_band(m, n) = (num_lower &lt; 0 || (m-n) &lt;= num_lower)</code> <code>&amp;&amp; (num_upper &lt; 0 || (n-m) &lt;= num_upper)</code></p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[ <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>], [-<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [-<span class="hljs-number">2</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [-<span class="hljs-number">3</span>, -<span class="hljs-number">2</span>, -<span class="hljs-number">1</span>, <span class="hljs-number">0</span>]]); <span class="hljs-keyword">let</span> y = tf.<span class="hljs-property">linalg</span>.<span class="hljs-title function_">bandPart</span>(x, <span class="hljs-number">1</span>, -<span class="hljs-number">1</span>); y.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[ 0, 1, 2, 3],</span> <span class="hljs-comment">// [-1, 0, 1, 2],</span> <span class="hljs-comment">// [ 0, -1, 0, 1],</span> <span class="hljs-comment">// [ 0, 0 , -1, 0]]</span> <span class="hljs-keyword">let</span> z = tf.<span class="hljs-property">linalg</span>.<span class="hljs-title function_">bandPart</span>(x, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>); z.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[ 0, 1, 0, 0],</span> <span class="hljs-comment">// [-1, 0, 1, 0],</span> <span class="hljs-comment">// [-2, -1, 0, 1],</span> <span class="hljs-comment">// [ 0, -2, -1, 0]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">a</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">numLower</span> <span class="param-type">(number|<a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">Number of subdiagonals to keep. If negative, keep entire lower triangle.</span> </li> <li class="parameter"> <span class="param-name">numUpper</span> <span class="param-type">(number|<a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">Number of subdiagonals to keep. If negative, keep entire upper triangle.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="linalg.gramSchmidt" href="#linalg.gramSchmidt"> tf.linalg.gramSchmidt</a> <span class="signature">(xs)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/linalg/gram_schmidt.ts#L60-L107" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Gram-Schmidt orthogonalization.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); <span class="hljs-keyword">let</span> y = tf.<span class="hljs-property">linalg</span>.<span class="hljs-title function_">gramSchmidt</span>(x); y.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Orthogonalized:&#x27;</span>); y.<span class="hljs-title function_">dot</span>(y.<span class="hljs-title function_">transpose</span>()).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// should be nearly the identity matrix.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;First row direction maintained:&#x27;</span>); <span class="hljs-keyword">const</span> data = <span class="hljs-keyword">await</span> y.<span class="hljs-title function_">array</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(data[<span class="hljs-number">0</span>][<span class="hljs-number">1</span>] / data[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>]); <span class="hljs-comment">// should be nearly 2.</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">xs</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>[]|<a href="#class:Tensor">tf.Tensor2D</a>)</span> <span class="param-docs">The vectors to be orthogonalized, in one of the two following formats:</p> <ul> <li>An Array of <a href="#class:Tensor">tf.Tensor1D</a>.</li> <li>A <a href="#class:Tensor">tf.Tensor2D</a>, i.e., a matrix, in which case the vectors are the rows of <code>xs</code>. In each case, all the vectors must have the same length and the length must be greater than or equal to the number of vectors.</li> </ul> </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor1D</a>[]|<a href="#class:Tensor">tf.Tensor2D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="linalg.qr" href="#linalg.qr"> tf.linalg.qr</a> <span class="signature">(x, fullMatrices?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/linalg/qr.ts#L84-L118" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Compute QR decomposition of m-by-n matrix using Householder transformation.</p> <p>Implementation based on [http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf] (http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf)</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); <span class="hljs-keyword">let</span> [q, r] = tf.<span class="hljs-property">linalg</span>.<span class="hljs-title function_">qr</span>(a); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Q&#x27;</span>); q.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;R&#x27;</span>); r.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Orthogonalized&#x27;</span>); q.<span class="hljs-title function_">dot</span>(q.<span class="hljs-title function_">transpose</span>()).<span class="hljs-title function_">print</span>() <span class="hljs-comment">// should be nearly the identity matrix.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Reconstructed&#x27;</span>); q.<span class="hljs-title function_">dot</span>(r).<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// should be nearly [[1, 2], [3, 4]];</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">x</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The <a href="#class:Tensor">tf.Tensor</a> to be QR-decomposed. Must have rank &gt;= 2. Suppose it has the shape <code>[..., M, N]</code>.</span> </li> <li class="parameter"> <span class="param-name">fullMatrices</span> <span class="param-type">(boolean)</span> <span class="param-docs">An optional boolean parameter. Defaults to <code>false</code>. If <code>true</code>, compute full-sized <code>Q</code>. If <code>false</code> (the default), compute only the leading N columns of <code>Q</code> and <code>R</code>.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">[<a href="#class:Tensor">tf.Tensor</a>, <a href="#class:Tensor">tf.Tensor</a>]</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-Sparse" href="#Operations-Sparse" class="symbol-link"> Operations / Sparse </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sparseFillEmptyRows" href="#sparseFillEmptyRows"> tf.sparseFillEmptyRows</a> <span class="signature">(indices, values, denseShape, defaultValue)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sparse/sparse_fill_empty_rows.ts#L82-L125" target=_blank>Source</a> </span> </div> <div class="documentation"><p>The input SparseTensor is represented via the map of inputs {<code>indices</code>, <code>values</code>, <code>denseShape</code>}. The output SparseTensor has the same <code>denseShape</code> but with indices <code>outputIndices</code> and values <code>outputValues</code>. This op inserts a single entry for every row that doesn't have any values. The index is created as <code>[row, 0, ..., 0]</code> and the inserted value is <code>defaultValue</code>.</p> <p>For example, suppose <code>spInput</code> has shape [5, 6] and non-empty values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d</p> <p>Rows 1 and 4 are empty, so the output will be of shape [5, 6] with values: [0, 1]: a [0, 3]: b [1, 0]: <code>defaultValue</code> [2, 0]: c [3, 1]: d [4, 0]: <code>defaultValue</code></p> <p>The output SparseTensor will be in row-major order and will have the same shape as the input.</p> <p>This op also returns an indicator vector shaped [dense_shape[0]] such that emptyRowIndicator[i] = True iff row i was an empty row.</p> <p>And a reverse index map vector shaped [indices.shape[0]] that is used during backpropagation, reverseIndexMap[i] = outi s.t. indices[i, j] == outputIndices[outi, j] for all j</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> result = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseFillEmptyRows</span>( [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">3</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">3</span>]], [<span class="hljs-number">0</span>, <span class="hljs-number">10</span>, <span class="hljs-number">13</span>, <span class="hljs-number">14</span>, <span class="hljs-number">32</span>, <span class="hljs-number">33</span>], [<span class="hljs-number">5</span>, <span class="hljs-number">6</span>], -<span class="hljs-number">1</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(result); result[<span class="hljs-string">&#x27;outputIndices&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[0, 0], [1, 0], [1, 3], [1, 4],</span> <span class="hljs-comment">// [2, 0], [3, 2], [3, 3], [4, 0]]</span> result[<span class="hljs-string">&#x27;outputValues&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 10, 13, 14,-1, 32, 33, -1]</span> result[<span class="hljs-string">&#x27;emptyRowIndicator&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [false, false, true, false, true]</span> result[<span class="hljs-string">&#x27;reverseIndexMap&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 1, 2, 3, 5, 6]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 2-D. The indices of the sparse tensor.</span> </li> <li class="parameter"> <span class="param-name">values</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 1-D. The values of the sparse tensor.</span> </li> <li class="parameter"> <span class="param-name">denseShape</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 1-D. The shape of the sparse tensor.</span> </li> <li class="parameter"> <span class="param-name">defaultValue</span> <span class="param-type">(<a href="#class:Tensor">tf.Scalar</a>|ScalarLike)</span> <span class="param-docs">: 0-D. Default value to insert into location [row, 0, ..., 0] for rows missing from the input sparse tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sparseReshape" href="#sparseReshape"> tf.sparseReshape</a> <span class="signature">(inputIndices, inputShape, newShape)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sparse/sparse_reshape.ts#L60-L90" target=_blank>Source</a> </span> </div> <div class="documentation"><p>This operation has the same semantics as reshape on the represented dense tensor. The <code>inputIndices</code> are recomputed based on the requested <code>newShape</code>. If one component of <code>newShape</code> is the special value -1, the size of that dimension is computed so that the total dense size remains constant. At most one component of <code>newShape</code> can be -1. The number of dense elements implied by <code>newShape</code> must be the same as the number of dense elements originally implied by <code>inputShape</code>. Reshaping does not affect the order of values in the SparseTensor. If the input tensor has rank R_in and N non-empty values, and <code>newShape</code> has length R_out, then <code>inputIndices</code> has shape [N, R_in], <code>inputShape</code> has length R_in, <code>outputIndices</code> has shape [N, R_out], and <code>outputShape</code> has length R_out.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> result = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseReshape</span>( [[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">6</span>], [<span class="hljs-number">9</span>, -<span class="hljs-number">1</span>]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(result); result[<span class="hljs-string">&#x27;outputIndices&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">//[[0, 0], [0, 1], [1, 2], [4, 2], [8, 1]]</span> result[<span class="hljs-string">&#x27;outputShape&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [9, 4]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">inputIndices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 2-D. N x R_in matrix with the indices of non-empty values in a SparseTensor.</span> </li> <li class="parameter"> <span class="param-name">inputShape</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 1-D. R_in Tensor1D with the input SparseTensor's dense shape.</span> </li> <li class="parameter"> <span class="param-name">newShape</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 1-D. R_out Tensor1D with the requested new dense shape.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sparseSegmentMean" href="#sparseSegmentMean"> tf.sparseSegmentMean</a> <span class="signature">(data, indices, segmentIds)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sparse/sparse_segment_mean.ts#L59-L88" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the mean along sparse segments of a tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>,<span class="hljs-number">4</span>], [-<span class="hljs-number">1</span>,-<span class="hljs-number">2</span>,-<span class="hljs-number">3</span>,-<span class="hljs-number">4</span>], [<span class="hljs-number">6</span>,<span class="hljs-number">7</span>,<span class="hljs-number">8</span>,<span class="hljs-number">9</span>]]); <span class="hljs-comment">// Select two rows, one segment.</span> <span class="hljs-keyword">const</span> result1 = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseSegmentMean</span>(c, tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;int32&#x27;</span>)); result1.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[0, 0, 0, 0]]</span> <span class="hljs-comment">// Select two rows, two segments.</span> <span class="hljs-keyword">const</span> result2 = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseSegmentMean</span>(c, tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>)); result2.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[1, 2, 3, 4], [-1, -2, -3, -4]]</span> <span class="hljs-comment">// Select all rows, two segments.</span> <span class="hljs-keyword">const</span> result3 = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseSegmentMean</span>(c, tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>)); result3.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">data</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A Tensor of at least one dimension with data that will be assembled in the output.</span> </li> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A 1-D Tensor with indices into data. Has same rank as segmentIds.</span> </li> <li class="parameter"> <span class="param-name">segmentIds</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A 1-D Tensor with indices into the output Tensor. Values should be sorted and can be repeated.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="sparseSegmentSum" href="#sparseSegmentSum"> tf.sparseSegmentSum</a> <span class="signature">(data, indices, segmentIds)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/sparse/sparse_segment_sum.ts#L59-L88" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the sum along sparse segments of a tensor.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>,<span class="hljs-number">4</span>], [-<span class="hljs-number">1</span>,-<span class="hljs-number">2</span>,-<span class="hljs-number">3</span>,-<span class="hljs-number">4</span>], [<span class="hljs-number">5</span>,<span class="hljs-number">6</span>,<span class="hljs-number">7</span>,<span class="hljs-number">8</span>]]); <span class="hljs-comment">// Select two rows, one segment.</span> <span class="hljs-keyword">const</span> result1 = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseSegmentSum</span>(c, tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;int32&#x27;</span>)); result1.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[0, 0, 0, 0]]</span> <span class="hljs-comment">// Select two rows, two segments.</span> <span class="hljs-keyword">const</span> result2 = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseSegmentSum</span>(c, tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>)); result2.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[1, 2, 3, 4], [-1, -2, -3, -4]]</span> <span class="hljs-comment">// Select all rows, two segments.</span> <span class="hljs-keyword">const</span> result3 = tf.<span class="hljs-property">sparse</span>.<span class="hljs-title function_">sparseSegmentSum</span>(c, tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], <span class="hljs-string">&#x27;int32&#x27;</span>)); result3.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[0, 0, 0, 0], [5, 6, 7, 8]]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">data</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A Tensor of at least one dimension with data that will be assembled in the output.</span> </li> <li class="parameter"> <span class="param-name">indices</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A 1-D Tensor with indices into data. Has same rank as segmentIds.</span> </li> <li class="parameter"> <span class="param-name">segmentIds</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A 1-D Tensor with indices into the output Tensor. Values should be sorted and can be repeated.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Operations-String" href="#Operations-String" class="symbol-link"> Operations / String </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="staticRegexReplace" href="#staticRegexReplace"> tf.staticRegexReplace</a> <span class="signature">(input, pattern, rewrite, replaceGlobal?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/string/static_regex_replace.ts#L45-L54" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Replace the match of a <code>pattern</code> in <code>input</code> with <code>rewrite</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> result = tf.<span class="hljs-property">string</span>.<span class="hljs-title function_">staticRegexReplace</span>( [<span class="hljs-string">&#x27;format this spacing better&#x27;</span>], <span class="hljs-string">&#x27; +&#x27;</span>, <span class="hljs-string">&#x27; &#x27;</span>); result.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [&#x27;format this spacing better&#x27;]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a> | <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: A Tensor of type string. The text to be processed.</span> </li> <li class="parameter"> <span class="param-name">pattern</span> <span class="param-type">(string)</span> <span class="param-docs">: A string. The regular expression to match the input.</span> </li> <li class="parameter"> <span class="param-name">rewrite</span> <span class="param-type">(string)</span> <span class="param-docs">: A string. The rewrite to be applied to the matched expression.</span> </li> <li class="parameter"> <span class="param-name">replaceGlobal</span> <span class="param-type">(boolean)</span> <span class="param-docs">: An optional bool. Defaults to True. If True, the replacement is global, otherwise the replacement is done only on the first match.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="stringNGrams" href="#stringNGrams"> tf.stringNGrams</a> <span class="signature">(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/string/string_n_grams.ts#L67-L97" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates ngrams from ragged string data.</p> <p>This op accepts a ragged tensor with 1 ragged dimension containing only strings and outputs a ragged tensor with 1 ragged dimension containing ngrams of that string, joined along the innermost axis.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> result = tf.<span class="hljs-property">string</span>.<span class="hljs-title function_">stringNGrams</span>( [<span class="hljs-string">&#x27;a&#x27;</span>, <span class="hljs-string">&#x27;b&#x27;</span>, <span class="hljs-string">&#x27;c&#x27;</span>, <span class="hljs-string">&#x27;d&#x27;</span>], tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">2</span>, <span class="hljs-number">4</span>], <span class="hljs-string">&#x27;int32&#x27;</span>), <span class="hljs-string">&#x27;|&#x27;</span>, [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], <span class="hljs-string">&#x27;LP&#x27;</span>, <span class="hljs-string">&#x27;RP&#x27;</span>, -<span class="hljs-number">1</span>, <span class="hljs-literal">false</span>); result[<span class="hljs-string">&#x27;nGrams&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [&#x27;a&#x27;, &#x27;b&#x27;, &#x27;LP|a&#x27;, &#x27;a|b&#x27;, &#x27;b|RP&#x27;,</span> <span class="hljs-comment">// &#x27;c&#x27;, &#x27;d&#x27;, &#x27;LP|c&#x27;, &#x27;c|d&#x27;, &#x27;d|RP&#x27;]</span> result[<span class="hljs-string">&#x27;nGramsSplits&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 5, 10]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">data</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: The values tensor of the ragged string tensor to make ngrams out of. Must be a 1D string tensor.</span> </li> <li class="parameter"> <span class="param-name">dataSplits</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: The splits tensor of the ragged string tensor to make ngrams out of.</span> </li> <li class="parameter"> <span class="param-name">separator</span> <span class="param-type">(string)</span> <span class="param-docs">: The string to append between elements of the token. Use &quot;&quot; for no separator.</span> </li> <li class="parameter"> <span class="param-name">nGramWidths</span> <span class="param-type">(number[])</span> <span class="param-docs">: The sizes of the ngrams to create.</span> </li> <li class="parameter"> <span class="param-name">leftPad</span> <span class="param-type">(string)</span> <span class="param-docs">: The string to use to pad the left side of the ngram sequence. Only used if pad_width !== 0.</span> </li> <li class="parameter"> <span class="param-name">rightPad</span> <span class="param-type">(string)</span> <span class="param-docs">: The string to use to pad the right side of the ngram sequence. Only used if pad_width !== 0.</span> </li> <li class="parameter"> <span class="param-name">padWidth</span> <span class="param-type">(number)</span> <span class="param-docs">: The number of padding elements to add to each side of each sequence. Note that padding will never be greater than <code>nGramWidths</code>-1 regardless of this value. If <code>padWidth</code>=-1, then add max(<code>nGramWidths</code>)-1 elements.</span> </li> <li class="parameter"> <span class="param-name">preserveShortSequences</span> <span class="param-type">(boolean)</span> <span class="param-docs">: If true, then ensure that at least one ngram is generated for each input sequence. In particular, if an input sequence is shorter than min(ngramWidth) + 2*padWidth, then generate a single ngram containing the entire sequence. If false, then no ngrams are generated for these short input sequences.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="stringSplit" href="#stringSplit"> tf.stringSplit</a> <span class="signature">(input, delimiter, skipEmpty?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/string/string_split.ts#L58-L79" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Split elements of <code>input</code> based on <code>delimiter</code> into a SparseTensor .</p> <p>Let N be the size of source (typically N will be the batch size). Split each element of <code>input</code> based on <code>delimiter</code> and return a SparseTensor containing the splitted tokens. Empty tokens are ignored if <code>skipEmpty</code> is set to True.</p> <p><code>delimiter</code> can be empty, or a string of split characters. If <code>delimiter</code> is an empty string, each element of <code>input</code> is split into individual character strings. Otherwise every character of <code>delimiter</code> is a potential split point.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> result = tf.<span class="hljs-property">string</span>.<span class="hljs-title function_">stringSplit</span>([<span class="hljs-string">&#x27;hello world&#x27;</span>, <span class="hljs-string">&#x27;a b c&#x27;</span>], <span class="hljs-string">&#x27; &#x27;</span>); result[<span class="hljs-string">&#x27;indices&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]</span> result[<span class="hljs-string">&#x27;values&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [&#x27;hello&#x27;, &#x27;world&#x27;, &#x27;a&#x27;, &#x27;b&#x27;, &#x27;c&#x27;]</span> result[<span class="hljs-string">&#x27;shape&#x27;</span>].<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [2, 3]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor1D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: 1-D. Strings to split.</span> </li> <li class="parameter"> <span class="param-name">delimiter</span> <span class="param-type">(<a href="#class:Tensor">tf.Scalar</a>|ScalarLike)</span> <span class="param-docs">: 0-D. Delimiter characters, or empty string.</span> </li> <li class="parameter"> <span class="param-name">skipEmpty</span> <span class="param-type">(boolean)</span> <span class="param-docs">: Optional. If true, skip the empty strings from the result. Defaults to true.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{[name: string]: <a href="#class:Tensor">tf.Tensor</a>}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="stringToHashBucketFast" href="#stringToHashBucketFast"> tf.stringToHashBucketFast</a> <span class="signature">(input, numBuckets)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/string/string_to_hash_bucket_fast.ts#L46-L58" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Converts each string in the input Tensor to its hash mod by a number of buckets.</p> <p>The hash function is deterministic on the content of the string within the process and will never change. However, it is not suitable for cryptography. This function may be used when CPU time is scarce and inputs are trusted or unimportant. There is a risk of adversaries constructing inputs that all hash to the same bucket.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> result = tf.<span class="hljs-property">string</span>.<span class="hljs-title function_">stringToHashBucketFast</span>( [<span class="hljs-string">&#x27;Hello&#x27;</span>, <span class="hljs-string">&#x27;TensorFlow&#x27;</span>, <span class="hljs-string">&#x27;2.x&#x27;</span>], <span class="hljs-number">3</span>); result.<span class="hljs-title function_">print</span>(); <span class="hljs-comment">// [0, 2, 2]</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">input</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">: The strings to assign a hash bucket.</span> </li> <li class="parameter"> <span class="param-name">numBuckets</span> <span class="param-type">(number)</span> <span class="param-docs">: The number of buckets.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Training" href="#Training" class="symbol-link">Training</a> </div> <div class="description"> <p>We also provide an API to do perform training, and compute gradients. We compute gradients eagerly, users provide a function that is a combination of operations and we automatically differentiate that function's output with respect to its inputs. <p>For those familiar with TensorFlow, the API we expose exactly mirrors the TensorFlow Eager API. </p> </div> </div> <div class="subheading"> <div class="title"> <a name="Training-Gradients" href="#Training-Gradients" class="symbol-link"> Training / Gradients </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="grad" href="#grad"> tf.grad</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/gradients.ts#L59-L80" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Provided <code>f(x)</code>, returns another function <code>g(x, dy?)</code>, which gives the gradient of <code>f(x)</code> with respect to <code>x</code>.</p> <p>If <code>dy</code> is provided, the gradient of <code>f(x).mul(dy).sum()</code> with respect to <code>x</code> is computed instead. <code>f(x)</code> must take a single tensor <code>x</code> and return a single tensor <code>y</code>. If <code>f()</code> takes multiple inputs, use <a href="#grads">tf.grads()</a> instead.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// f(x) = x ^ 2</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = x =&gt; x.<span class="hljs-title function_">square</span>(); <span class="hljs-comment">// f&#x27;(x) = 2x</span> <span class="hljs-keyword">const</span> g = tf.<span class="hljs-title function_">grad</span>(f); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-title function_">g</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// f(x) = x ^ 3</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = x =&gt; x.<span class="hljs-title function_">pow</span>(tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">3</span>, <span class="hljs-string">&#x27;int32&#x27;</span>)); <span class="hljs-comment">// f&#x27;(x) = 3x ^ 2</span> <span class="hljs-keyword">const</span> g = tf.<span class="hljs-title function_">grad</span>(f); <span class="hljs-comment">// f&#x27;&#x27;(x) = 6x</span> <span class="hljs-keyword">const</span> gg = tf.<span class="hljs-title function_">grad</span>(g); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-title function_">gg</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">((x: <a href="#class:Tensor">tf.Tensor</a>) =&gt; <a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The function f(x), to compute gradient for.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">( x: <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array|<a href="#class:Tensor">tf.Tensor</a>, dy?: <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array|<a href="#class:Tensor">tf.Tensor</a>) =&gt; <a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="grads" href="#grads"> tf.grads</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/gradients.ts#L111-L137" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Provided <code>f(x1, x2,...)</code>, returns another function <code>g([x1, x2,...], dy?)</code>, which gives an array of gradients of <code>f()</code> with respect to each input [<code>x1</code>,<code>x2</code>,...].</p> <p>If <code>dy</code> is passed when calling <code>g()</code>, the gradient of <code>f(x1,...).mul(dy).sum()</code> with respect to each input is computed instead. The provided <code>f</code> must take one or more tensors and return a single tensor <code>y</code>. If <code>f()</code> takes a single input, we recommend using <a href="#grad">tf.grad()</a> instead.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// f(a, b) = a * b</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = (<span class="hljs-params">a, b</span>) =&gt; a.<span class="hljs-title function_">mul</span>(b); <span class="hljs-comment">// df / da = b, df / db = a</span> <span class="hljs-keyword">const</span> g = tf.<span class="hljs-title function_">grads</span>(f); <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> [da, db] = <span class="hljs-title function_">g</span>([a, b]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;da&#x27;</span>); da.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;db&#x27;</span>); db.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">((...args: <a href="#class:Tensor">tf.Tensor</a>[]) =&gt; <a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The function <code>f(x1, x2,...)</code> to compute gradients for.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">( args: Array, dy?: <a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array) =&gt; <a href="#class:Tensor">tf.Tensor</a>[]</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="customGrad" href="#customGrad"> tf.customGrad</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/gradients.ts#L374-L377" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Overrides the gradient computation of a function <code>f</code>.</p> <p>Takes a function <code>f(...inputs, save) =&gt; {value: Tensor, gradFunc: (dy, saved) =&gt; Tensor[]}</code> and returns another function <code>g(...inputs)</code> which takes the same inputs as <code>f</code>. When called, <code>g</code> returns <code>f().value</code>. In backward mode, custom gradients with respect to each input of <code>f</code> are computed using <code>f().gradFunc</code>.</p> <p>The <code>save</code> function passed to <code>f</code> should be used for saving tensors needed in the gradient. And the <code>saved</code> passed to the <code>gradFunc</code> is a <code>NamedTensorMap</code>, which contains those saved tensors.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> customOp = tf.<span class="hljs-title function_">customGrad</span>(<span class="hljs-function">(<span class="hljs-params">x, save</span>) =&gt;</span> { <span class="hljs-comment">// Save x to make sure it&#x27;s available later for the gradient.</span> <span class="hljs-title function_">save</span>([x]); <span class="hljs-comment">// Override gradient of our custom x ^ 2 op to be dy * abs(x);</span> <span class="hljs-keyword">return</span> { <span class="hljs-attr">value</span>: x.<span class="hljs-title function_">square</span>(), <span class="hljs-comment">// Note `saved.x` which points to the `x` we saved earlier.</span> <span class="hljs-attr">gradFunc</span>: <span class="hljs-function">(<span class="hljs-params">dy, saved</span>) =&gt;</span> [dy.<span class="hljs-title function_">mul</span>(saved[<span class="hljs-number">0</span>].<span class="hljs-title function_">abs</span>())] }; }); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">1</span>, -<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> dx = tf.<span class="hljs-title function_">grad</span>(<span class="hljs-function"><span class="hljs-params">x</span> =&gt;</span> <span class="hljs-title function_">customOp</span>(x)); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`f(x):`</span>); <span class="hljs-title function_">customOp</span>(x).<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`f&#x27;(x):`</span>); <span class="hljs-title function_">dx</span>(x).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">((a: <a href="#class:Tensor">tf.Tensor</a>, b: <a href="#class:Tensor">tf.Tensor</a>,..., [tf.GraphModel.<a href="#tf.LayersModel.save">tf.LayersModel.save()</a>()](#tf.GraphModel.<a href="#tf.LayersModel.save">tf.LayersModel.save()</a>)?: Function) =&gt; { value: <a href="#class:Tensor">tf.Tensor</a>, gradFunc: (dy: <a href="#class:Tensor">tf.Tensor</a>, saved?: NamedTensorMap) =&gt; <a href="#class:Tensor">tf.Tensor</a> | <a href="#class:Tensor">tf.Tensor</a>[] })</span> <span class="param-docs">The function to evaluate in forward mode, which should return <code>{value: Tensor, gradFunc: (dy, saved) =&gt; Tensor[]}</code>, where <code>gradFunc</code> returns the custom gradients of <code>f</code> with respect to its inputs.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">(...args: <a href="#class:Tensor">tf.Tensor</a>[]) =&gt; <a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="valueAndGrad" href="#valueAndGrad"> tf.valueAndGrad</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/gradients.ts#L164-L183" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Like <a href="#grad">tf.grad()</a>, but also returns the value of <code>f()</code>. Useful when <code>f()</code> returns a metric you want to show.</p> <p>The result is a rich object with the following properties:</p> <ul> <li>grad: The gradient of <code>f(x)</code> w.r.t. <code>x</code> (result of <a href="#grad">tf.grad()</a>).</li> <li>value: The value returned by <code>f(x)</code>.</li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// f(x) = x ^ 2</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = x =&gt; x.<span class="hljs-title function_">square</span>(); <span class="hljs-comment">// f&#x27;(x) = 2x</span> <span class="hljs-keyword">const</span> g = tf.<span class="hljs-title function_">valueAndGrad</span>(f); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> {value, grad} = <span class="hljs-title function_">g</span>(x); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;value&#x27;</span>); value.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;grad&#x27;</span>); grad.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">((x: <a href="#class:Tensor">tf.Tensor</a>) =&gt; <a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">( x: <a href="#class:Tensor">tf.Tensor</a>, dy?: <a href="#class:Tensor">tf.Tensor</a>) =&gt; { value: <a href="#class:Tensor">tf.Tensor</a>; grad: <a href="#class:Tensor">tf.Tensor</a>; }</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="valueAndGrads" href="#valueAndGrads"> tf.valueAndGrads</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/gradients.ts#L216-L242" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Like <a href="#grads">tf.grads()</a>, but returns also the value of <code>f()</code>. Useful when <code>f()</code> returns a metric you want to show.</p> <p>The result is a rich object with the following properties:</p> <ul> <li>grads: The gradients of <code>f()</code> w.r.t. each input (result of <a href="#grads">tf.grads()</a>).</li> <li>value: The value returned by <code>f(x)</code>.</li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// f(a, b) = a * b</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = (<span class="hljs-params">a, b</span>) =&gt; a.<span class="hljs-title function_">mul</span>(b); <span class="hljs-comment">// df/da = b, df/db = a</span> <span class="hljs-keyword">const</span> g = tf.<span class="hljs-title function_">valueAndGrads</span>(f); <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">tensor1d</span>([-<span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> {value, grads} = <span class="hljs-title function_">g</span>([a, b]); <span class="hljs-keyword">const</span> [da, db] = grads; <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;value&#x27;</span>); value.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;da&#x27;</span>); da.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;db&#x27;</span>); db.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">((...args: <a href="#class:Tensor">tf.Tensor</a>[]) =&gt; <a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">( args: <a href="#class:Tensor">tf.Tensor</a>[], dy?: <a href="#class:Tensor">tf.Tensor</a>) =&gt; { grads: <a href="#class:Tensor">tf.Tensor</a>[]; value: <a href="#class:Tensor">tf.Tensor</a>; }</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="variableGrads" href="#variableGrads"> tf.variableGrads</a> <span class="signature">(f, varList?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/gradients.ts#L274-L332" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes and returns the gradient of f(x) with respect to the list of trainable variables provided by <code>varList</code>. If no list is provided, it defaults to all trainable variables.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">variable</span>(tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">3</span>, <span class="hljs-number">4</span>])); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">variable</span>(tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">5</span>, <span class="hljs-number">6</span>])); <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>]); <span class="hljs-comment">// f(a, b) = a * x ^ 2 + b * x</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = (<span class="hljs-params"></span>) =&gt; a.<span class="hljs-title function_">mul</span>(x.<span class="hljs-title function_">square</span>()).<span class="hljs-title function_">add</span>(b.<span class="hljs-title function_">mul</span>(x)).<span class="hljs-title function_">sum</span>(); <span class="hljs-comment">// df/da = x ^ 2, df/db = x</span> <span class="hljs-keyword">const</span> {value, grads} = tf.<span class="hljs-title function_">variableGrads</span>(f); <span class="hljs-title class_">Object</span>.<span class="hljs-title function_">keys</span>(grads).<span class="hljs-title function_">forEach</span>(<span class="hljs-function"><span class="hljs-params">varName</span> =&gt;</span> grads[varName].<span class="hljs-title function_">print</span>()); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">(() =&gt; <a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">The function to execute. f() should return a scalar.</span> </li> <li class="parameter"> <span class="param-name">varList</span> <span class="param-type">(<a href="#class:Variable">tf.Variable</a>[])</span> <span class="param-docs">The list of variables to compute the gradients with respect to. Defaults to all trainable variables.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{value: <a href="#class:Tensor">tf.Scalar</a>, grads: {[name: string]: <a href="#class:Tensor">tf.Tensor</a>}}</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Training-Optimizers" href="#Training-Optimizers" class="symbol-link"> Training / Optimizers </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.sgd" href="#train.sgd"> tf.train.sgd</a> <span class="signature">(learningRate)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L64-L66" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <a href="#class:train.Optimizer">tf.SGDOptimizer</a> that uses stochastic gradient descent.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// Fit a quadratic function by learning the coefficients a, b, c.</span> <span class="hljs-keyword">const</span> xs = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> ys = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1.1</span>, <span class="hljs-number">5.9</span>, <span class="hljs-number">16.8</span>, <span class="hljs-number">33.9</span>]); <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-title class_">Math</span>.<span class="hljs-title function_">random</span>()).<span class="hljs-title function_">variable</span>(); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-title class_">Math</span>.<span class="hljs-title function_">random</span>()).<span class="hljs-title function_">variable</span>(); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-title class_">Math</span>.<span class="hljs-title function_">random</span>()).<span class="hljs-title function_">variable</span>(); <span class="hljs-comment">// y = a * x^2 + b * x + c.</span> <span class="hljs-keyword">const</span> <span class="hljs-title function_">f</span> = x =&gt; a.<span class="hljs-title function_">mul</span>(x.<span class="hljs-title function_">square</span>()).<span class="hljs-title function_">add</span>(b.<span class="hljs-title function_">mul</span>(x)).<span class="hljs-title function_">add</span>(c); <span class="hljs-keyword">const</span> <span class="hljs-title function_">loss</span> = (<span class="hljs-params">pred, label</span>) =&gt; pred.<span class="hljs-title function_">sub</span>(label).<span class="hljs-title function_">square</span>().<span class="hljs-title function_">mean</span>(); <span class="hljs-keyword">const</span> learningRate = <span class="hljs-number">0.01</span>; <span class="hljs-keyword">const</span> optimizer = tf.<span class="hljs-property">train</span>.<span class="hljs-title function_">sgd</span>(learningRate); <span class="hljs-comment">// Train the model.</span> <span class="hljs-keyword">for</span> (<span class="hljs-keyword">let</span> i = <span class="hljs-number">0</span>; i &lt; <span class="hljs-number">10</span>; i++) { optimizer.<span class="hljs-title function_">minimize</span>(<span class="hljs-function">() =&gt;</span> <span class="hljs-title function_">loss</span>(<span class="hljs-title function_">f</span>(xs), ys)); } <span class="hljs-comment">// Make predictions.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>( <span class="hljs-string">`a: <span class="hljs-subst">${a.dataSync()}</span>, b: <span class="hljs-subst">${b.dataSync()}</span>, c: <span class="hljs-subst">${c.dataSync()}</span>`</span>); <span class="hljs-keyword">const</span> preds = <span class="hljs-title function_">f</span>(xs).<span class="hljs-title function_">dataSync</span>(); preds.<span class="hljs-title function_">forEach</span>(<span class="hljs-function">(<span class="hljs-params">pred, i</span>) =&gt;</span> { <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`x: <span class="hljs-subst">${i}</span>, pred: <span class="hljs-subst">${pred}</span>`</span>); }); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the SGD algorithm.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:train.Optimizer">tf.SGDOptimizer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.momentum" href="#train.momentum"> tf.train.momentum</a> <span class="signature">(learningRate, momentum, useNesterov?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L83-L86" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <a href="#class:train.Optimizer">tf.MomentumOptimizer</a> that uses momentum gradient descent.</p> <p>See <a target="_blank" rel="noopener" href="http://proceedings.mlr.press/v28/sutskever13.pdf">http://proceedings.mlr.press/v28/sutskever13.pdf</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the Momentum gradient descent algorithm.</span> </li> <li class="parameter"> <span class="param-name">momentum</span> <span class="param-type">(number)</span> <span class="param-docs">The momentum to use for the momentum gradient descent algorithm.</span> </li> <li class="parameter"> <span class="param-name">useNesterov</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:train.Optimizer">tf.MomentumOptimizer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.adagrad" href="#train.adagrad"> tf.train.adagrad</a> <span class="signature">(learningRate, initialAccumulatorValue?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L185-L188" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <a href="#class:train.Optimizer">tf.AdagradOptimizer</a> that uses the Adagrad algorithm. See <a target="_blank" rel="noopener" href="http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf">http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf</a> or <a target="_blank" rel="noopener" href="http://ruder.io/optimizing-gradient-descent/index.html#adagrad">http://ruder.io/optimizing-gradient-descent/index.html#adagrad</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the Adagrad gradient descent algorithm.</span> </li> <li class="parameter"> <span class="param-name">initialAccumulatorValue</span> <span class="param-type">(number)</span> <span class="param-docs">Starting value for the accumulators, must be positive.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:train.Optimizer">tf.AdagradOptimizer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.adadelta" href="#train.adadelta"> tf.train.adadelta</a> <span class="signature">(learningRate?, rho?, epsilon?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L145-L148" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <a href="#class:train.Optimizer">tf.AdadeltaOptimizer</a> that uses the Adadelta algorithm. See <a target="_blank" rel="noopener" href="https://arxiv.org/abs/1212.5701">https://arxiv.org/abs/1212.5701</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the Adadelta gradient descent algorithm.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">rho</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate decay over each update.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">A constant epsilon used to better condition the grad update.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:train.Optimizer">tf.AdadeltaOptimizer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.adam" href="#train.adam"> tf.train.adam</a> <span class="signature">(learningRate?, beta1?, beta2?, epsilon?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L127-L131" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <code>tf.AdamOptimizer</code> that uses the Adam algorithm. See <a target="_blank" rel="noopener" href="https://arxiv.org/abs/1412.6980">https://arxiv.org/abs/1412.6980</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the Adam gradient descent algorithm.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">beta1</span> <span class="param-type">(number)</span> <span class="param-docs">The exponential decay rate for the 1st moment estimates.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">beta2</span> <span class="param-type">(number)</span> <span class="param-docs">The exponential decay rate for the 2nd moment estimates.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">A small constant for numerical stability.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">AdamOptimizer</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.adamax" href="#train.adamax"> tf.train.adamax</a> <span class="signature">(learningRate?, beta1?, beta2?, epsilon?, decay?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L163-L167" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <code>tf.AdamaxOptimizer</code> that uses the Adamax algorithm. See <a target="_blank" rel="noopener" href="https://arxiv.org/abs/1412.6980">https://arxiv.org/abs/1412.6980</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the Adamax gradient descent algorithm.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">beta1</span> <span class="param-type">(number)</span> <span class="param-docs">The exponential decay rate for the 1st moment estimates.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">beta2</span> <span class="param-type">(number)</span> <span class="param-docs">The exponential decay rate for the 2nd moment estimates.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">A small constant for numerical stability.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">decay</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate decay over each update.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">AdamaxOptimizer</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="train.rmsprop" href="#train.rmsprop"> tf.train.rmsprop</a> <span class="signature">(learningRate, decay?, momentum?, epsilon?, centered?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer_constructors.ts#L108-L113" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constructs a <a href="#class:train.Optimizer">tf.RMSPropOptimizer</a> that uses RMSProp gradient descent. This implementation uses plain momentum and is not centered version of RMSProp.</p> <p>See <a target="_blank" rel="noopener" href="http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf">http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">learningRate</span> <span class="param-type">(number)</span> <span class="param-docs">The learning rate to use for the RMSProp gradient descent algorithm.</span> </li> <li class="parameter"> <span class="param-name">decay</span> <span class="param-type">(number)</span> <span class="param-docs">The discounting factor for the history/coming gradient.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">momentum</span> <span class="param-type">(number)</span> <span class="param-docs">The momentum to use for the RMSProp gradient descent algorithm.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">Small value to avoid zero denominator.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">centered</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, gradients are normalized by the estimated variance of the gradient.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:train.Optimizer">tf.RMSPropOptimizer</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Training-Losses" href="#Training-Losses" class="symbol-link"> Training / Losses </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.absoluteDifference" href="#losses.absoluteDifference"> tf.losses.absoluteDifference</a> <span class="signature">(labels, predictions, weights?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/absolute_difference.ts#L44-L60" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the absolute difference loss between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor, same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.computeWeightedLoss" href="#losses.computeWeightedLoss"> tf.losses.computeWeightedLoss</a> <span class="signature">(losses, weights?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/compute_weighted_loss.ts#L43-L83" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the weighted loss between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">losses</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor of shape <code>[batch_size, d1, ..., dN]</code>.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>losses</code>, and must be broadcastable to <code>losses</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.cosineDistance" href="#losses.cosineDistance"> tf.losses.cosineDistance</a> <span class="signature">(labels, predictions, axis, weights?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/cosine_distance.ts#L46-L63" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the cosine distance loss between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor, same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">The dimension along which the cosine distance is computed.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.hingeLoss" href="#losses.hingeLoss"> tf.losses.hingeLoss</a> <span class="signature">(labels, predictions, weights?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/hinge_loss.ts#L45-L62" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the Hinge loss between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor, same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.huberLoss" href="#losses.huberLoss"> tf.losses.huberLoss</a> <span class="signature">(labels, predictions, weights?, delta?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/huber_loss.ts#L50-L70" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the Huber loss between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor, same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">delta</span> <span class="param-type">(number)</span> <span class="param-docs">Point where Huber loss changes from quadratic to linear.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code>.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.logLoss" href="#losses.logLoss"> tf.losses.logLoss</a> <span class="signature">(labels, predictions, weights?, epsilon?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/log_loss.ts#L49-L69" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the log loss between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor, same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">epsilon</span> <span class="param-type">(number)</span> <span class="param-docs">A small increment to avoid taking log of zero</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.meanSquaredError" href="#losses.meanSquaredError"> tf.losses.meanSquaredError</a> <span class="signature">(labels, predictions, weights?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/mean_squared_error.ts#L43-L59" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the mean squared error between two tensors.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">labels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor, same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">predictions</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.sigmoidCrossEntropy" href="#losses.sigmoidCrossEntropy"> tf.losses.sigmoidCrossEntropy</a> <span class="signature">(multiClassLabels, logits, weights?, labelSmoothing?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/sigmoid_cross_entropy.ts#L93-L119" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the sigmoid cross entropy loss between two tensors.</p> <p>If labelSmoothing is nonzero, smooth the labels towards 1/2:</p> <p>newMulticlassLabels = multiclassLabels * (1 - labelSmoothing) + 0.5 * labelSmoothing</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">multiClassLabels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The ground truth output tensor of shape [batch_size, num_classes], same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">logits</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or the same rank as <code>labels</code>, and must be broadcastable to <code>labels</code> (i.e., all dimensions must be either <code>1</code>, or the same as the corresponding <code>losses</code> dimension).</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">labelSmoothing</span> <span class="param-type">(number)</span> <span class="param-docs">If greater than 0, then smooth the labels.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="losses.softmaxCrossEntropy" href="#losses.softmaxCrossEntropy"> tf.losses.softmaxCrossEntropy</a> <span class="signature">(onehotLabels, logits, weights?, labelSmoothing?, reduction?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/losses/softmax_cross_entropy.ts#L125-L154" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the softmax cross entropy loss between two tensors.</p> <p>If labelSmoothing is nonzero, smooth the labels towards 1/2:</p> <p>newOnehotLabels = onehotLabels * (1 - labelSmoothing) + labelSmoothing / numClasses</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">onehotLabels</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">One hot encoded labels [batch_size, num_classes], same dimensions as 'predictions'.</span> </li> <li class="parameter"> <span class="param-name">logits</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The predicted outputs.</span> </li> <li class="parameter"> <span class="param-name">weights</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">Tensor whose rank is either 0, or 1, and must be broadcastable to <code>loss</code> of shape [batch_size]</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">labelSmoothing</span> <span class="param-type">(number)</span> <span class="param-docs">If greater than 0, then smooth the labels.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reduction</span> <span class="param-type">(Reduction)</span> <span class="param-docs">Type of reduction to apply to loss. Should be of type <code>Reduction</code></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Training-Classes" href="#Training-Classes" class="symbol-link"> Training / Classes </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:train.Optimizer" href="#class:train.Optimizer">tf.train.Optimizer</a> <span class="signature"> <span>extends Serializable</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer.ts#L42-L166" target=_blank>Source</a> </span> </div> <div class="documentation"></div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.train.Optimizer.minimize" href="#tf.train.Optimizer.minimize"> minimize</a> <span class="signature">(f, returnCost?, varList?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer.ts#L59-L80" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes <code>f()</code> and minimizes the scalar output of <code>f()</code> by computing gradients of y with respect to the list of trainable variables provided by <code>varList</code>. If no list is provided, it defaults to all trainable variables.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">(() =&gt; <a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">The function to execute and whose output to minimize.</span> </li> <li class="parameter"> <span class="param-name">returnCost</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to return the scalar cost value produced by executing <code>f()</code>.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">varList</span> <span class="param-type">(<a href="#class:Variable">tf.Variable</a>[])</span> <span class="param-docs">An optional list of variables to update. If specified, only the trainable variables in varList will be updated by minimize. Defaults to all trainable variables.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Scalar</a> |null</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.train.Optimizer.computeGradients" href="#tf.train.Optimizer.computeGradients"> computeGradients</a> <span class="signature">(f, varList?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer.ts#L109-L112" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes f() and computes the gradient of the scalar output of f() with respect to the list of trainable variables provided by <code>varList</code>. If no list is provided, it defaults to all trainable variables.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">(() =&gt; <a href="#class:Tensor">tf.Scalar</a>)</span> <span class="param-docs">The function to execute and whose output to use for computing gradients with respect to variables.</span> </li> <li class="parameter"> <span class="param-name">varList</span> <span class="param-type">(<a href="#class:Variable">tf.Variable</a>[])</span> <span class="param-docs">An optional list of variables to compute gradients with respect to. If specified, only the trainable variables in varList will have gradients computed with respect to. Defaults to all trainable variables.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">{value: <a href="#class:Tensor">tf.Scalar</a>, grads: {[name: string]: <a href="#class:Tensor">tf.Tensor</a>}}</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.train.Optimizer.applyGradients" href="#tf.train.Optimizer.applyGradients"> applyGradients</a> <span class="signature">(variableGradients)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/optimizers/optimizer.ts#L121-L122" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Updates variables by using the computed gradients.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">variableGradients</span> <span class="param-type">({[name: string]: <a href="#class:Tensor">tf.Tensor</a>}| NamedTensor[])</span> <span class="param-docs">A mapping of variable name to its gradient value.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Performance" href="#Performance" class="symbol-link">Performance</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Performance-Memory" href="#Performance-Memory" class="symbol-link"> Performance / Memory </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="tidy" href="#tidy"> tf.tidy</a> <span class="signature">(nameOrFn, fn?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L190-L193" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes the provided function <code>fn</code> and after it is executed, cleans up all intermediate tensors allocated by <code>fn</code> except those returned by <code>fn</code>. <code>fn</code> must not return a Promise (async functions not allowed). The returned result can be a complex object.</p> <p>Using this method helps avoid memory leaks. In general, wrap calls to operations in <a href="#tidy">tf.tidy()</a> for automatic memory cleanup.</p> <p>NOTE: Variables do <em>not</em> get cleaned up when inside a tidy(). If you want to dispose variables, please use <a href="#disposeVariables">tf.disposeVariables()</a> or call dispose() directly on variables.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-comment">// y = 2 ^ 2 + 1</span> <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tidy</span>(<span class="hljs-function">() =&gt;</span> { <span class="hljs-comment">// a, b, and one will be cleaned up when the tidy ends.</span> <span class="hljs-keyword">const</span> one = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">1</span>); <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">2</span>); <span class="hljs-keyword">const</span> b = a.<span class="hljs-title function_">square</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;numTensors (in tidy): &#x27;</span> + tf.<span class="hljs-title function_">memory</span>().<span class="hljs-property">numTensors</span>); <span class="hljs-comment">// The value returned inside the tidy function will return</span> <span class="hljs-comment">// through the tidy, in this case to the variable y.</span> <span class="hljs-keyword">return</span> b.<span class="hljs-title function_">add</span>(one); }); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;numTensors (outside tidy): &#x27;</span> + tf.<span class="hljs-title function_">memory</span>().<span class="hljs-property">numTensors</span>); y.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">nameOrFn</span> <span class="param-type">(string|Function)</span> <span class="param-docs">The name of the closure, or the function to execute. If a name is provided, the 2nd argument should be the function. If debug mode is on, the timing and the memory usage of the function will be tracked and displayed on the console using the provided name.</span> </li> <li class="parameter"> <span class="param-name">fn</span> <span class="param-type">(Function)</span> <span class="param-docs">The function to execute.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string}</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="dispose" href="#dispose"> tf.dispose</a> <span class="signature">(container)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L206-L209" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Disposes any <a href="#class:Tensor">tf.Tensor</a>s found within the provided object.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">container</span> <span class="param-type">(void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string})</span> <span class="param-docs">an object that may be a <a href="#class:Tensor">tf.Tensor</a> or may directly contain <a href="#class:Tensor">tf.Tensor</a>s, such as a <code>Tensor[]</code> or <code>{key: Tensor, ...}</code>. If the object is not a <a href="#class:Tensor">tf.Tensor</a> or does not contain <code>Tensors</code>, nothing happens. In general it is safe to pass any object here, except that <code>Promise</code>s are not supported.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="keep" href="#keep"> tf.keep</a> <span class="signature">(result)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L243-L245" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Keeps a <a href="#class:Tensor">tf.Tensor</a> generated inside a <a href="#tidy">tf.tidy()</a> from being disposed automatically.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">let</span> b; <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tidy</span>(<span class="hljs-function">() =&gt;</span> { <span class="hljs-keyword">const</span> one = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">1</span>); <span class="hljs-keyword">const</span> a = tf.<span class="hljs-title function_">scalar</span>(<span class="hljs-number">2</span>); <span class="hljs-comment">// b will not be cleaned up by the tidy. a and one will be cleaned up</span> <span class="hljs-comment">// when the tidy ends.</span> b = tf.<span class="hljs-title function_">keep</span>(a.<span class="hljs-title function_">square</span>()); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;numTensors (in tidy): &#x27;</span> + tf.<span class="hljs-title function_">memory</span>().<span class="hljs-property">numTensors</span>); <span class="hljs-comment">// The value returned inside the tidy function will return</span> <span class="hljs-comment">// through the tidy, in this case to the variable y.</span> <span class="hljs-keyword">return</span> b.<span class="hljs-title function_">add</span>(one); }); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;numTensors (outside tidy): &#x27;</span> + tf.<span class="hljs-title function_">memory</span>().<span class="hljs-property">numTensors</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;y:&#x27;</span>); y.<span class="hljs-title function_">print</span>(); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;b:&#x27;</span>); b.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">result</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The tensor to keep from being disposed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="memory" href="#memory"> tf.memory</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L110-L112" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns memory info at the current time in the program. The result is an object with the following properties:</p> <ul> <li><code>numBytes</code>: Number of bytes allocated (undisposed) at this time.</li> <li><code>numTensors</code>: Number of unique tensors allocated.</li> <li><code>numDataBuffers</code>: Number of unique data buffers allocated (undisposed) at this time, which is ≤ the number of tensors (e.g. <code>a.reshape(newShape)</code> makes a new Tensor that shares the same data buffer with <code>a</code>).</li> <li><code>unreliable</code>: True if the memory usage is unreliable. See <code>reasons</code> when <code>unreliable</code> is true.</li> <li><code>reasons</code>: <code>string[]</code>, reasons why the memory is unreliable, present if <code>unreliable</code> is true.</li> </ul> <p>WebGL Properties:</p> <ul> <li><code>numBytesInGPU</code>: Number of bytes allocated (undisposed) in the GPU only at this time.</li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">MemoryInfo</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Performance-Timing" href="#Performance-Timing" class="symbol-link"> Performance / Timing </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="time" href="#time"> tf.time</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L272-L274" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes <code>f()</code> and returns a promise that resolves with timing information.</p> <p>The result is an object with the following properties:</p> <ul> <li><code>wallMs</code>: Wall execution time.</li> <li><code>kernelMs</code>: Kernel execution time, ignoring data transfer. If using the WebGL backend and the query timer extension is not available, this will return an error object.</li> <li>On <code>WebGL</code> The following additional properties exist: <ul> <li><code>uploadWaitMs</code>: CPU blocking time on texture uploads.</li> <li><code>downloadWaitMs</code>: CPU blocking time on texture downloads (readPixels).</li> </ul> </li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">randomNormal</span>([<span class="hljs-number">20</span>, <span class="hljs-number">20</span>]); <span class="hljs-keyword">const</span> time = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">time</span>(<span class="hljs-function">() =&gt;</span> x.<span class="hljs-title function_">matMul</span>(x)); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`kernelMs: <span class="hljs-subst">${time.kernelMs}</span>, wallTimeMs: <span class="hljs-subst">${time.wallMs}</span>`</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">(() =&gt; void)</span> <span class="param-docs">The function to execute and time.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;TimingInfo&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="nextFrame" href="#nextFrame"> tf.nextFrame</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/browser_util.ts#L37-L39" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a promise that resolves when a requestAnimationFrame has completed.</p> <p>On Node.js this uses setImmediate instead of requestAnimationFrame.</p> <p>This is simply a sugar method so that users can do the following: <code>await tf.nextFrame();</code></p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;void&gt;</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Performance-Profile" href="#Performance-Profile" class="symbol-link"> Performance / Profile </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="profile" href="#profile"> tf.profile</a> <span class="signature">(f)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L145-L148" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Executes the provided function <code>f()</code> and returns a promise that resolves with information about the function's memory use:</p> <ul> <li><code>newBytes</code>: the number of new bytes allocated</li> <li><code>newTensors</code>: the number of new tensors created</li> <li><code>peakBytes</code>: the peak number of bytes allocated</li> <li><code>kernels</code>: an array of objects for each kernel involved that reports their input and output shapes, number of bytes used, and number of new tensors created.</li> <li><code>kernelNames</code>: an array of unique strings with just the names of the kernels in the <code>kernels</code> array.</li> </ul> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> profile = <span class="hljs-keyword">await</span> tf.<span class="hljs-title function_">profile</span>(<span class="hljs-function">() =&gt;</span> { <span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">let</span> x2 = x.<span class="hljs-title function_">square</span>(); x2.<span class="hljs-title function_">dispose</span>(); x2 = x.<span class="hljs-title function_">square</span>(); x2.<span class="hljs-title function_">dispose</span>(); <span class="hljs-keyword">return</span> x; }); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`newBytes: <span class="hljs-subst">${profile.newBytes}</span>`</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`newTensors: <span class="hljs-subst">${profile.newTensors}</span>`</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">`byte usage over all kernels: <span class="hljs-subst">${profile.kernels.map(k =&gt; k.totalBytesSnapshot)}</span>`</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">(() =&gt; (void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string} | Promise&lt;void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string}&gt;))</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;ProfileInfo&gt;</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Environment" href="#Environment" class="symbol-link">Environment</a> </div> <div class="description"> <p>TensorFlow.js can run mathematical operations on different backends. Currently, we support WebGL and JavaScript CPU. By default, we choose the 'best' backend available, but allow users to customize their backend.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Environment-" href="#Environment-" class="symbol-link"> Environment / </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:Environment" href="#class:Environment">tf.Environment</a> <span class="signature"> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/environment.ts#L41-L177" target=_blank>Source</a> </span> </div> <div class="documentation"><p>The environment contains evaluated flags as well as the registered platform. This is always used as a global singleton and can be retrieved with <code>tf.env()</code>.</p> </div> <div class="method-list"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="disposeVariables" href="#disposeVariables"> tf.disposeVariables</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L76-L78" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Dispose all variables kept in backend engine.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="enableDebugMode" href="#enableDebugMode"> tf.enableDebugMode</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L51-L53" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Enables debug mode which will log information about all executed kernels: the elapsed time of the kernel execution, as well as the rank, shape, and size of the output tensor.</p> <p>Debug mode will significantly slow down your application as it will download the result of every operation to the CPU. This should not be used in production. Debug mode does not affect the timing information of the kernel execution as we do not measure download time in the kernel execution time.</p> <p>See also: <a href="#profile">tf.profile()</a>, <a href="#memory">tf.memory()</a>.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="enableProdMode" href="#enableProdMode"> tf.enableProdMode</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L33-L35" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Enables production mode which disables correctness checks in favor of performance.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="engine" href="#engine"> tf.engine</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L85-L87" target=_blank>Source</a> </span> </div> <div class="documentation"><p>It returns the global engine that keeps track of all tensors and backends.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Engine</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="env" href="#env"> tf.env</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/environment.ts#L212-L214" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the current environment (a global singleton).</p> <p>The environment object contains the evaluated feature values as well as the active platform.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Environment">tf.Environment</a></span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Constraints" href="#Constraints" class="symbol-link">Constraints</a> </div> <div class="description"> <p>Constraints are added to attributes of a Layer (such as weights, kernels, or biases) at construction time to clamp, or otherwise enforce an allowed range, of values for different components of the Layer.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Constraints-Classes" href="#Constraints-Classes" class="symbol-link"> Constraints / Classes </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:constraints.Constraint" href="#class:constraints.Constraint">tf.constraints.Constraint</a> <span class="signature"> <span>extends serialization.Serializable</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/constraints.ts#L34-L40" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Base class for functions that impose constraints on weight values</p> </div> <div class="method-list"> </div> </div> <div class="subheading"> <div class="title"> <a name="Constraints-" href="#Constraints-" class="symbol-link"> Constraints / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="constraints.maxNorm" href="#constraints.maxNorm"> tf.constraints.maxNorm</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_constraints.ts#L26-L28" target=_blank>Source</a> </span> </div> <div class="documentation"><p>MaxNorm weight constraint.</p> <p>Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value.</p> <p>References - <a target="_blank" rel="noopener" href="http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf">Dropout: A Simple Way to Prevent Neural Networks from Overfitting Srivastava, Hinton, et al. 2014</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">maxValue</span> <span class="param-type">(number)</span> <span class="param-docs">Maximum norm for incoming weights</span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">Axis along which to calculate norms.</p> <p>For instance, in a <code>Dense</code> layer the weight matrix has shape <code>[inputDim, outputDim]</code>, set <code>axis</code> to <code>0</code> to constrain each weight vector of length <code>[inputDim,]</code>. In a <code>Conv2D</code> layer with <code>dataFormat=&quot;channels_last&quot;</code>, the weight tensor has shape <code>[rows, cols, inputDepth, outputDepth]</code>, set <code>axis</code> to <code>[0, 1, 2]</code> to constrain the weights of each filter tensor of size <code>[rows, cols, inputDepth]</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:constraints.Constraint">tf.constraints.Constraint</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="constraints.minMaxNorm" href="#constraints.minMaxNorm"> tf.constraints.minMaxNorm</a> <span class="signature">(config)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_constraints.ts#L49-L51" target=_blank>Source</a> </span> </div> <div class="documentation"></div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">minValue</span> <span class="param-type">(number)</span> <span class="param-docs">Minimum norm for incoming weights</span> </li> <li class="parameter config-param"> <span class="param-name">maxValue</span> <span class="param-type">(number)</span> <span class="param-docs">Maximum norm for incoming weights</span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">Axis along which to calculate norms. For instance, in a <code>Dense</code> layer the weight matrix has shape <code>[inputDim, outputDim]</code>, set <code>axis</code> to <code>0</code> to constrain each weight vector of length <code>[inputDim,]</code>. In a <code>Conv2D</code> layer with <code>dataFormat=&quot;channels_last&quot;</code>, the weight tensor has shape <code>[rows, cols, inputDepth, outputDepth]</code>, set <code>axis</code> to <code>[0, 1, 2]</code> to constrain the weights of each filter tensor of size <code>[rows, cols, inputDepth]</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">rate</span> <span class="param-type">(number)</span> <span class="param-docs">Rate for enforcing the constraint: weights will be rescaled to yield: <code>(1 - rate) * norm + rate * norm.clip(minValue, maxValue)</code>. Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate&lt;1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:constraints.Constraint">tf.constraints.Constraint</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="constraints.nonNeg" href="#constraints.nonNeg"> tf.constraints.nonNeg</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_constraints.ts#L44-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constrains the weight to be non-negative.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:constraints.Constraint">tf.constraints.Constraint</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="constraints.unitNorm" href="#constraints.unitNorm"> tf.constraints.unitNorm</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_constraints.ts#L35-L37" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Constrains the weights incident to each hidden unit to have unit norm.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">axis</span> <span class="param-type">(number)</span> <span class="param-docs">Axis along which to calculate norms.</p> <p>For instance, in a <code>Dense</code> layer the weight matrix has shape <code>[inputDim, outputDim]</code>, set <code>axis</code> to <code>0</code> to constrain each weight vector of length <code>[inputDim,]</code>. In a <code>Conv2D</code> layer with <code>dataFormat=&quot;channels_last&quot;</code>, the weight tensor has shape <code>[rows, cols, inputDepth, outputDepth]</code>, set <code>axis</code> to <code>[0, 1, 2]</code> to constrain the weights of each filter tensor of size <code>[rows, cols, inputDepth]</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:constraints.Constraint">tf.constraints.Constraint</a></span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Initializers" href="#Initializers" class="symbol-link">Initializers</a> </div> <div class="description"> <p>Initializers are used in Layers to establish the starting the values of weights, biases, kernels, etc.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Initializers-Classes" href="#Initializers-Classes" class="symbol-link"> Initializers / Classes </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:initializers.Initializer" href="#class:initializers.Initializer">tf.initializers.Initializer</a> <span class="signature"> <span>extends serialization.Serializable</span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/initializers.ts#L35-L50" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer base class.</p> </div> <div class="method-list"> </div> </div> <div class="subheading"> <div class="title"> <a name="Initializers-" href="#Initializers-" class="symbol-link"> Initializers / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.constant" href="#initializers.constant"> tf.initializers.constant</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L36-L38" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates values initialized to some constant.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">value</span> <span class="param-type">(number)</span> <span class="param-docs">The value for each element in the variable.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.glorotNormal" href="#initializers.glorotNormal"> tf.initializers.glorotNormal</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L134-L136" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Glorot normal initializer, also called Xavier normal initializer. It draws samples from a truncated normal distribution centered on 0 with <code>stddev = sqrt(2 / (fan_in + fan_out))</code> where <code>fan_in</code> is the number of input units in the weight tensor and <code>fan_out</code> is the number of output units in the weight tensor.</p> <p>Reference: Glorot &amp; Bengio, AISTATS 2010 http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.glorotUniform" href="#initializers.glorotUniform"> tf.initializers.glorotUniform</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L117-L119" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where <code>limit</code> is <code>sqrt(6 / (fan_in + fan_out))</code> where <code>fan_in</code> is the number of input units in the weight tensor and <code>fan_out</code> is the number of output units in the weight tensor</p> <p>Reference: Glorot &amp; Bengio, AISTATS 2010 http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.heNormal" href="#initializers.heNormal"> tf.initializers.heNormal</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L150-L152" target=_blank>Source</a> </span> </div> <div class="documentation"><p>He normal initializer.</p> <p>It draws samples from a truncated normal distribution centered on 0 with <code>stddev = sqrt(2 / fanIn)</code> where <code>fanIn</code> is the number of input units in the weight tensor.</p> <p>Reference: He et al., http://arxiv.org/abs/1502.01852</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.heUniform" href="#initializers.heUniform"> tf.initializers.heUniform</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L166-L168" target=_blank>Source</a> </span> </div> <div class="documentation"><p>He uniform initializer.</p> <p>It draws samples from a uniform distribution within [-limit, limit] where <code>limit</code> is <code>sqrt(6 / fan_in)</code> where <code>fanIn</code> is the number of input units in the weight tensor.</p> <p>Reference: He et al., http://arxiv.org/abs/1502.01852</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.identity" href="#initializers.identity"> tf.initializers.identity</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L83-L85" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates the identity matrix. Only use for square 2D matrices.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">gain</span> <span class="param-type">(number)</span> <span class="param-docs">Multiplicative factor to apply to the identity matrix.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.leCunNormal" href="#initializers.leCunNormal"> tf.initializers.leCunNormal</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L183-L185" target=_blank>Source</a> </span> </div> <div class="documentation"><p>LeCun normal initializer.</p> <p>It draws samples from a truncated normal distribution centered on 0 with <code>stddev = sqrt(1 / fanIn)</code> where <code>fanIn</code> is the number of input units in the weight tensor.</p> <p>References: <a target="_blank" rel="noopener" href="https://arxiv.org/abs/1706.02515">Self-Normalizing Neural Networks</a> <a target="_blank" rel="noopener" href="http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf">Efficient Backprop</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.leCunUniform" href="#initializers.leCunUniform"> tf.initializers.leCunUniform</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L196-L198" target=_blank>Source</a> </span> </div> <div class="documentation"><p>LeCun uniform initializer.</p> <p>It draws samples from a uniform distribution in the interval <code>[-limit, limit]</code> with <code>limit = sqrt(3 / fanIn)</code>, where <code>fanIn</code> is the number of input units in the weight tensor.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.ones" href="#initializers.ones"> tf.initializers.ones</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L27-L29" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates tensors initialized to 1.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.orthogonal" href="#initializers.orthogonal"> tf.initializers.orthogonal</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L208-L210" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates a random orthogonal matrix.</p> <p>Reference: <a target="_blank" rel="noopener" href="http://arxiv.org/abs/1312.6120">Saxe et al., http://arxiv.org/abs/1312.6120</a></p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">gain</span> <span class="param-type">(number)</span> <span class="param-docs">Multiplicative factor to apply to the orthogonal matrix. Defaults to 1.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.randomNormal" href="#initializers.randomNormal"> tf.initializers.randomNormal</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L59-L61" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates random values initialized to a normal distribution.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">mean</span> <span class="param-type">(number)</span> <span class="param-docs">Mean of the random values to generate.</span> </li> <li class="parameter config-param"> <span class="param-name">stddev</span> <span class="param-type">(number)</span> <span class="param-docs">Standard deviation of the random values to generate.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Used to seed the random generator.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.randomUniform" href="#initializers.randomUniform"> tf.initializers.randomUniform</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L49-L51" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates random values initialized to a uniform distribution.</p> <p>Values will be distributed uniformly between the configured minval and maxval.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">minval</span> <span class="param-type">(number)</span> <span class="param-docs">Lower bound of the range of random values to generate.</span> </li> <li class="parameter config-param"> <span class="param-name">maxval</span> <span class="param-type">(number)</span> <span class="param-docs">Upper bound of the range of random values to generate.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Used to seed the random generator.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.truncatedNormal" href="#initializers.truncatedNormal"> tf.initializers.truncatedNormal</a> <span class="signature">(args)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L73-L75" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates random values initialized to a truncated normal distribution.</p> <p>These values are similar to values from a <code>RandomNormal</code> except that values more than two standard deviations from the mean are discarded and re-drawn. This is the recommended initializer for neural network weights and filters.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">mean</span> <span class="param-type">(number)</span> <span class="param-docs">Mean of the random values to generate.</span> </li> <li class="parameter config-param"> <span class="param-name">stddev</span> <span class="param-type">(number)</span> <span class="param-docs">Standard deviation of the random values to generate.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Used to seed the random generator.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.varianceScaling" href="#initializers.varianceScaling"> tf.initializers.varianceScaling</a> <span class="signature">(config)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L100-L102" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer capable of adapting its scale to the shape of weights. With distribution=NORMAL, samples are drawn from a truncated normal distribution centered on zero, with <code>stddev = sqrt(scale / n)</code> where n is:</p> <ul> <li>number of input units in the weight tensor, if mode = FAN_IN.</li> <li>number of output units, if mode = FAN_OUT.</li> <li>average of the numbers of input and output units, if mode = FAN_AVG. With distribution=UNIFORM, samples are drawn from a uniform distribution within [-limit, limit], with <code>limit = sqrt(3 * scale / n)</code>.</li> </ul> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">scale</span> <span class="param-type">(number)</span> <span class="param-docs">Scaling factor (positive float).</span> </li> <li class="parameter config-param"> <span class="param-name">mode</span> <span class="param-type">('fanIn'|'fanOut'|'fanAvg')</span> <span class="param-docs">Fanning mode for inputs and outputs.</span> </li> <li class="parameter config-param"> <span class="param-name">distribution</span> <span class="param-type">('normal'|'uniform'|'truncatedNormal')</span> <span class="param-docs">Probabilistic distribution of the values.</span> </li> <li class="parameter config-param"> <span class="param-name">seed</span> <span class="param-type">(number)</span> <span class="param-docs">Random number generator seed.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:initializers.Initializer">tf.initializers.Initializer</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="initializers.zeros" href="#initializers.zeros"> tf.initializers.zeros</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_initializers.ts#L18-L20" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Initializer that generates tensors initialized to 0.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Zeros</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Regularizers" href="#Regularizers" class="symbol-link">Regularizers</a> </div> <div class="description"> <p>Regularizers can be attached to various components of a Layer to add a 'scoring' function to help drive weights, or other trainable values, away from excessively large values. They're typically used to promote a notion that a 'simpler' model is better than a complicated model, assuming equal performance.</p> </div> </div> <div class="subheading"> <div class="title"> <a name="Regularizers-" href="#Regularizers-" class="symbol-link"> Regularizers / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="regularizers.l1" href="#regularizers.l1"> tf.regularizers.l1</a> <span class="signature">(config?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_regularizers.ts#L35-L37" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Regularizer for L1 regularization.</p> <p>Adds a term to the loss to penalize large weights: loss += sum(l1 * abs(x))</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">l1</span> <span class="param-type">(number)</span> <span class="param-docs">L1 regularization rate. Defaults to 0.01.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Regularizer</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="regularizers.l1l2" href="#regularizers.l1l2"> tf.regularizers.l1l2</a> <span class="signature">(config?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_regularizers.ts#L22-L24" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Regularizer for L1 and L2 regularization.</p> <p>Adds a term to the loss to penalize large weights: loss += sum(l1 * abs(x)) + sum(l2 * x^2)</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">l1</span> <span class="param-type">(number)</span> <span class="param-docs">L1 regularization rate. Defaults to 0.01.</span> </li> <li class="parameter config-param"> <span class="param-name">l2</span> <span class="param-type">(number)</span> <span class="param-docs">L2 regularization rate. Defaults to 0.01.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Regularizer</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="regularizers.l2" href="#regularizers.l2"> tf.regularizers.l2</a> <span class="signature">(config?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_regularizers.ts#L48-L50" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Regularizer for L2 regularization.</p> <p>Adds a term to the loss to penalize large weights: loss += sum(l2 * x^2)</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">config</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">l2</span> <span class="param-type">(number)</span> <span class="param-docs">L2 regularization rate. Defaults to 0.01.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Regularizer</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Data" href="#Data" class="symbol-link">Data</a> </div> <div class="description"> <p>TensorFlow.js Data provides simple APIs to load and parse data from disk or over the web in a variety of formats, and to prepare that data for use in machine learning models (e.g. via operations like filter, map, shuffle, and batch). </div> </div> <div class="subheading"> <div class="title"> <a name="Data-Creation" href="#Data-Creation" class="symbol-link"> Data / Creation </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="data.array" href="#data.array"> tf.data.array</a> <span class="signature">(items)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L581-L584" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create a <code>Dataset</code> from an array of elements.</p> <p>Create a Dataset from an array of objects:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([{<span class="hljs-string">&#x27;item&#x27;</span>: <span class="hljs-number">1</span>}, {<span class="hljs-string">&#x27;item&#x27;</span>: <span class="hljs-number">2</span>}, {<span class="hljs-string">&#x27;item&#x27;</span>: <span class="hljs-number">3</span>}]); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> <p>Create a Dataset from an array of numbers:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">items</span> <span class="param-type">(tf.void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string}[])</span> <span class="param-docs">An array of elements that will be parsed as items in a dataset.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="data.csv" href="#data.csv"> tf.data.csv</a> <span class="signature">(source, csvConfig?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/readers.ts#L106-L109" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create a <code>CSVDataset</code> by reading and decoding CSV file(s) from provided URL or local path if it's in Node environment.</p> <p>Note: If isLabel in columnConfigs is <code>true</code> for at least one column, the element in returned <code>CSVDataset</code> will be an object of <code>{xs:features, ys:labels}</code>: xs is a dict of features key/value pairs, ys is a dict of labels key/value pairs. If no column is marked as label, returns a dict of features only.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> csvUrl = <span class="hljs-string">&#x27;https://storage.googleapis.com/tfjs-examples/multivariate-linear-regression/data/boston-housing-train.csv&#x27;</span>; <span class="hljs-keyword">async</span> <span class="hljs-keyword">function</span> <span class="hljs-title function_">run</span>(<span class="hljs-params"></span>) { <span class="hljs-comment">// We want to predict the column &quot;medv&quot;, which represents a median value of</span> <span class="hljs-comment">// a home (in $1000s), so we mark it as a label.</span> <span class="hljs-keyword">const</span> csvDataset = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">csv</span>( csvUrl, { <span class="hljs-attr">columnConfigs</span>: { <span class="hljs-attr">medv</span>: { <span class="hljs-attr">isLabel</span>: <span class="hljs-literal">true</span> } } }); <span class="hljs-comment">// Number of features is the number of column names minus one for the label</span> <span class="hljs-comment">// column.</span> <span class="hljs-keyword">const</span> numOfFeatures = (<span class="hljs-keyword">await</span> csvDataset.<span class="hljs-title function_">columnNames</span>()).<span class="hljs-property">length</span> - <span class="hljs-number">1</span>; <span class="hljs-comment">// Prepare the Dataset for training.</span> <span class="hljs-keyword">const</span> flattenedDataset = csvDataset .<span class="hljs-title function_">map</span>(<span class="hljs-function">(<span class="hljs-params">{xs, ys}</span>) =&gt;</span> { <span class="hljs-comment">// Convert xs(features) and ys(labels) from object form (keyed by</span> <span class="hljs-comment">// column name) to array form.</span> <span class="hljs-keyword">return</span> {<span class="hljs-attr">xs</span>:<span class="hljs-title class_">Object</span>.<span class="hljs-title function_">values</span>(xs), <span class="hljs-attr">ys</span>:<span class="hljs-title class_">Object</span>.<span class="hljs-title function_">values</span>(ys)}; }) .<span class="hljs-title function_">batch</span>(<span class="hljs-number">10</span>); <span class="hljs-comment">// Define the model.</span> <span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({ <span class="hljs-attr">inputShape</span>: [numOfFeatures], <span class="hljs-attr">units</span>: <span class="hljs-number">1</span> })); model.<span class="hljs-title function_">compile</span>({ <span class="hljs-attr">optimizer</span>: tf.<span class="hljs-property">train</span>.<span class="hljs-title function_">sgd</span>(<span class="hljs-number">0.000001</span>), <span class="hljs-attr">loss</span>: <span class="hljs-string">&#x27;meanSquaredError&#x27;</span> }); <span class="hljs-comment">// Fit the model using the prepared Dataset</span> <span class="hljs-keyword">return</span> model.<span class="hljs-title function_">fitDataset</span>(flattenedDataset, { <span class="hljs-attr">epochs</span>: <span class="hljs-number">10</span>, <span class="hljs-attr">callbacks</span>: { <span class="hljs-attr">onEpochEnd</span>: <span class="hljs-keyword">async</span> (epoch, logs) =&gt; { <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(epoch + <span class="hljs-string">&#x27;:&#x27;</span> + logs.<span class="hljs-property">loss</span>); } } }); } <span class="hljs-keyword">await</span> <span class="hljs-title function_">run</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">source</span> <span class="param-type">(RequestInfo)</span> <span class="param-docs">URL or local path to get CSV file. If it's a local path, it must have prefix <code>file://</code> and it only works in node environment.</span> </li> <li class="parameter"> <span class="param-name">csvConfig</span> <span class="param-type">(Object)</span> <span class="param-docs">(Optional) A CSVConfig object that contains configurations of reading and decoding from CSV file(s).</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">hasHeader</span> <span class="param-type">(boolean)</span> <span class="param-docs">A boolean value that indicates whether the first row of provided CSV file is a header line with column names, and should not be included in the data.</span> </li> <li class="parameter config-param"> <span class="param-name">columnNames</span> <span class="param-type">(string[])</span> <span class="param-docs">A list of strings that corresponds to the CSV column names, in order. If provided, it ignores the column names inferred from the header row. If not provided, infers the column names from the first row of the records. If <code>hasHeader</code> is false and <code>columnNames</code> is not provided, this method will throw an error.</span> </li> <li class="parameter config-param"> <span class="param-name">columnConfigs</span> <span class="param-type">({[key: string]: ColumnConfig})</span> <span class="param-docs">A dictionary whose key is column names, value is an object stating if this column is required, column's data type, default value, and if this column is label. If provided, keys must correspond to names provided in <code>columnNames</code> or inferred from the file header lines. If any column is marked as label, the .csv() API will return an array of two items: the first item is a dict of features key/value pairs, the second item is a dict of labels key/value pairs. If no column is marked as label returns a dict of features only.</p> <p>Has the following fields:</p> <ul> <li> <p><code>required</code> If value in this column is required. If set to <code>true</code>, throw an error when it finds an empty value.</p> </li> <li> <p><code>dtype</code> Data type of this column. Could be int32, float32, bool, or string.</p> </li> <li> <p><code>default</code> Default value of this column.</p> </li> <li> <p><code>isLabel</code> Whether this column is label instead of features. If isLabel is <code>true</code> for at least one column, the element in returned <code>CSVDataset</code> will be an object of {xs: features, ys: labels}: xs is a dict of features key/value pairs, ys is a dict of labels key/value pairs. If no column is marked as label, returns a dict of features only.</p> </li> </ul> </span> </li> <li class="parameter config-param"> <span class="param-name">configuredColumnsOnly</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, only columns provided in <code>columnConfigs</code> will be parsed and provided during iteration.</span> </li> <li class="parameter config-param"> <span class="param-name">delimiter</span> <span class="param-type">(string)</span> <span class="param-docs">The string used to parse each line of the input file.</span> </li> <li class="parameter config-param"> <span class="param-name">delimWhitespace</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, delimiter field should be null. Parsing delimiter is whitespace and treat continuous multiple whitespace as one delimiter.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.CSVDataset">tf.data.CSVDataset</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="data.generator" href="#data.generator"> tf.data.generator</a> <span class="signature">(generator)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/readers.ts#L199-L206" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create a <code>Dataset</code> that produces each element from provided JavaScript generator, which is a function that returns a (potentially async) iterator.</p> <p>For more information on iterators and generators, see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators . For the iterator protocol, see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols .</p> <p>Example of creating a dataset from an iterator factory:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">function</span> <span class="hljs-title function_">makeIterator</span>(<span class="hljs-params"></span>) { <span class="hljs-keyword">const</span> numElements = <span class="hljs-number">10</span>; <span class="hljs-keyword">let</span> index = <span class="hljs-number">0</span>; <span class="hljs-keyword">const</span> iterator = { <span class="hljs-attr">next</span>: <span class="hljs-function">() =&gt;</span> { <span class="hljs-keyword">let</span> result; <span class="hljs-keyword">if</span> (index &lt; numElements) { result = {<span class="hljs-attr">value</span>: index, <span class="hljs-attr">done</span>: <span class="hljs-literal">false</span>}; index++; <span class="hljs-keyword">return</span> result; } <span class="hljs-keyword">return</span> {<span class="hljs-attr">value</span>: index, <span class="hljs-attr">done</span>: <span class="hljs-literal">true</span>}; } }; <span class="hljs-keyword">return</span> iterator; } <span class="hljs-keyword">const</span> ds = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">generator</span>(makeIterator); <span class="hljs-keyword">await</span> ds.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> <p>Example of creating a dataset from a generator:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">function</span>* <span class="hljs-title function_">dataGenerator</span>(<span class="hljs-params"></span>) { <span class="hljs-keyword">const</span> numElements = <span class="hljs-number">10</span>; <span class="hljs-keyword">let</span> index = <span class="hljs-number">0</span>; <span class="hljs-keyword">while</span> (index &lt; numElements) { <span class="hljs-keyword">const</span> x = index; index++; <span class="hljs-keyword">yield</span> x; } } <span class="hljs-keyword">const</span> ds = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">generator</span>(dataGenerator); <span class="hljs-keyword">await</span> ds.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">generator</span> <span class="param-type">(() =&gt; Iterator | Promise&lt;Iterator&lt;void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string}&gt;&gt; | AsyncIterator&lt;void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string}&gt;)</span> <span class="param-docs">A JavaScript function that returns a (potentially async) JavaScript iterator.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="data.microphone" href="#data.microphone"> tf.data.microphone</a> <span class="signature">(microphoneConfig?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/readers.ts#L278-L281" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create an iterator that generates frequency-domain spectrogram <code>Tensor</code>s from microphone audio stream with browser's native FFT. This API only works in browser environment when the device has microphone.</p> <p>Note: this code snippet only works when the device has a microphone. It will request permission to open the microphone when running.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> mic = <span class="hljs-keyword">await</span> tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">microphone</span>({ <span class="hljs-attr">fftSize</span>: <span class="hljs-number">1024</span>, <span class="hljs-attr">columnTruncateLength</span>: <span class="hljs-number">232</span>, <span class="hljs-attr">numFramesPerSpectrogram</span>: <span class="hljs-number">43</span>, <span class="hljs-attr">sampleRateHz</span>:<span class="hljs-number">44100</span>, <span class="hljs-attr">includeSpectrogram</span>: <span class="hljs-literal">true</span>, <span class="hljs-attr">includeWaveform</span>: <span class="hljs-literal">true</span> }); <span class="hljs-keyword">const</span> audioData = <span class="hljs-keyword">await</span> mic.<span class="hljs-title function_">capture</span>(); <span class="hljs-keyword">const</span> spectrogramTensor = audioData.<span class="hljs-property">spectrogram</span>; spectrogramTensor.<span class="hljs-title function_">print</span>(); <span class="hljs-keyword">const</span> waveformTensor = audioData.<span class="hljs-property">waveform</span>; waveformTensor.<span class="hljs-title function_">print</span>(); mic.<span class="hljs-title function_">stop</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">microphoneConfig</span> <span class="param-type">(Object)</span> <span class="param-docs">A <code>MicrophoneConfig</code> object that contains configurations of reading audio data from microphone.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">sampleRateHz</span> <span class="param-type">(44100|48000)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">fftSize</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">columnTruncateLength</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">numFramesPerSpectrogram</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">audioTrackConstraints</span> <span class="param-type">(MediaTrackConstraints)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">smoothingTimeConstant</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">includeSpectrogram</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> </li> <li class="parameter config-param"> <span class="param-name">includeWaveform</span> <span class="param-type">(boolean)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;MicrophoneIterator&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="data.webcam" href="#data.webcam"> tf.data.webcam</a> <span class="signature">(webcamVideoElement?, webcamConfig?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/readers.ts#L238-L242" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create an iterator that generates <code>Tensor</code>s from webcam video stream. This API only works in Browser environment when the device has webcam.</p> <p>Note: this code snippet only works when the device has a webcam. It will request permission to open the webcam when running.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> videoElement = <span class="hljs-variable language_">document</span>.<span class="hljs-title function_">createElement</span>(<span class="hljs-string">&#x27;video&#x27;</span>); videoElement.<span class="hljs-property">width</span> = <span class="hljs-number">100</span>; videoElement.<span class="hljs-property">height</span> = <span class="hljs-number">100</span>; <span class="hljs-keyword">const</span> cam = <span class="hljs-keyword">await</span> tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">webcam</span>(videoElement); <span class="hljs-keyword">const</span> img = <span class="hljs-keyword">await</span> cam.<span class="hljs-title function_">capture</span>(); img.<span class="hljs-title function_">print</span>(); cam.<span class="hljs-title function_">stop</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">webcamVideoElement</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement">HTMLVideoElement</a>)</span> <span class="param-docs">A <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement">HTMLVideoElement</a> used to play video from webcam. If this element is not provided, a hidden <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement">HTMLVideoElement</a> will be created. In that case, <code>resizeWidth</code> and <code>resizeHeight</code> must be provided to set the generated tensor shape.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">webcamConfig</span> <span class="param-type">(Object)</span> <span class="param-docs">A <code>WebcamConfig</code> object that contains configurations of reading and manipulating data from webcam video stream.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">facingMode</span> <span class="param-type">('user'|'environment')</span> <span class="param-docs">A string specifying which camera to use on device. If the value is 'user', it will use front camera. If the value is 'environment', it will use rear camera.</span> </li> <li class="parameter config-param"> <span class="param-name">deviceId</span> <span class="param-type">(string)</span> <span class="param-docs">A string used to request a specific camera. The deviceId can be obtained by calling <code>mediaDevices.enumerateDevices()</code>.</span> </li> <li class="parameter config-param"> <span class="param-name">resizeWidth</span> <span class="param-type">(number)</span> <span class="param-docs">Specifies the width of the output tensor. The actual width of the HTMLVideoElement (if provided) can be different and the final image will be resized to match resizeWidth.</span> </li> <li class="parameter config-param"> <span class="param-name">resizeHeight</span> <span class="param-type">(number)</span> <span class="param-docs">Specifies the height of the output tensor. The actual height of the HTMLVideoElement (if provided) can be different and the final image will be resized to match resizeHeight.</span> </li> <li class="parameter config-param"> <span class="param-name">centerCrop</span> <span class="param-type">(boolean)</span> <span class="param-docs">A boolean value that indicates whether to crop the video frame from center. If true, <code>resizeWidth</code> and <code>resizeHeight</code> must be specified; then an image of size <code>[resizeWidth, resizeHeight]</code> is taken from the center of the frame without scaling. If false, the entire image is returned (perhaps scaled to fit in <code>[resizeWidth, resizeHeight]</code>, if those are provided).</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;WebcamIterator&gt;</span> </div> </div> <div class="subheading"> <div class="title"> <a name="Data-Operations" href="#Data-Operations" class="symbol-link"> Data / Operations </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="data.zip" href="#data.zip"> tf.data.zip</a> <span class="signature">(datasets)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L627-L659" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Create a <code>Dataset</code> by zipping together an array, dict, or nested structure of <code>Dataset</code>s (and perhaps additional constants). The underlying datasets must provide elements in a consistent order such that they correspond.</p> <p>The number of elements in the resulting dataset is the same as the size of the smallest dataset in datasets.</p> <p>The nested structure of the <code>datasets</code> argument determines the structure of elements in the resulting iterator.</p> <p>Note this means that, given an array of two datasets that produce dict elements, the result is a dataset that produces elements that are arrays of two dicts:</p> <p>Zip an array of datasets:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Zip two datasets of objects:&#x27;</span>); <span class="hljs-keyword">const</span> ds1 = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([{<span class="hljs-attr">a</span>: <span class="hljs-number">1</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">2</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">3</span>}]); <span class="hljs-keyword">const</span> ds2 = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([{<span class="hljs-attr">b</span>: <span class="hljs-number">4</span>}, {<span class="hljs-attr">b</span>: <span class="hljs-number">5</span>}, {<span class="hljs-attr">b</span>: <span class="hljs-number">6</span>}]); <span class="hljs-keyword">const</span> ds3 = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">zip</span>([ds1, ds2]); <span class="hljs-keyword">await</span> ds3.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(e))); <span class="hljs-comment">// If the goal is to merge the dicts in order to produce elements like</span> <span class="hljs-comment">// {a: ..., b: ...}, this requires a second step such as:</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;Merge the objects:&#x27;</span>); <span class="hljs-keyword">const</span> ds4 = ds3.<span class="hljs-title function_">map</span>(<span class="hljs-function"><span class="hljs-params">x</span> =&gt;</span> {<span class="hljs-keyword">return</span> {<span class="hljs-attr">a</span>: x[<span class="hljs-number">0</span>].<span class="hljs-property">a</span>, <span class="hljs-attr">b</span>: x[<span class="hljs-number">1</span>].<span class="hljs-property">b</span>}}); <span class="hljs-keyword">await</span> ds4.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> <p>Zip a dict of datasets:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([{<span class="hljs-attr">a</span>: <span class="hljs-number">1</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">2</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">3</span>}]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([{<span class="hljs-attr">b</span>: <span class="hljs-number">4</span>}, {<span class="hljs-attr">b</span>: <span class="hljs-number">5</span>}, {<span class="hljs-attr">b</span>: <span class="hljs-number">6</span>}]); <span class="hljs-keyword">const</span> c = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">zip</span>({<span class="hljs-attr">c</span>: a, <span class="hljs-attr">d</span>: b}); <span class="hljs-keyword">await</span> c.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-title class_">JSON</span>.<span class="hljs-title function_">stringify</span>(e))); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">datasets</span> <span class="param-type">(DatasetContainer)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="subheading"> <div class="title"> <a name="Data-Classes" href="#Data-Classes" class="symbol-link"> Data / Classes </a> </div> <div class="description"> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:data.CSVDataset" href="#class:data.CSVDataset">tf.data.CSVDataset</a> <span class="signature"> <span>extends <a href="#class:data.Dataset">tf.data.Dataset</a></span> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/datasets/csv_dataset.ts#L46-L392" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Represents a potentially large collection of delimited text records.</p> <p>The produced <code>TensorContainer</code>s each contain one key-value pair for every column of the table. When a field is empty in the incoming data, the resulting value is <code>undefined</code>, or throw error if it is required. Values that can be parsed as numbers are emitted as type <code>number</code>, other values are parsed as <code>string</code>.</p> <p>The results are not batched.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.CSVDataset.columnNames" href="#tf.data.CSVDataset.columnNames"> columnNames</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/datasets/csv_dataset.ts#L66-L72" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns column names of the csv dataset. If <code>configuredColumnsOnly</code> is true, return column names in <code>columnConfigs</code>. If <code>configuredColumnsOnly</code> is false and <code>columnNames</code> is provided, <code>columnNames</code>. If <code>configuredColumnsOnly</code> is false and <code>columnNames</code> is not provided, return all column names parsed from the csv file. For example usage please go to <a href="#data.csv">tf.data.csv()</a>.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;string[]&gt;</span> </div> </div> </div> </div> <div class="symbol class"> <div class="symbol-header"> <a class="symbol-link" name="class:data.Dataset" href="#class:data.Dataset">tf.data.Dataset</a> <span class="signature"> </span> <span class="chip">class</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L62-L532" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Represents a potentially large list of independent data elements (typically 'samples' or 'examples').</p> <p>A 'data example' may be a primitive, an array, a map from string keys to values, or any nested structure of these.</p> <p>A <code>Dataset</code> represents an ordered collection of elements, together with a chain of transformations to be performed on those elements. Each transformation is a method of <code>Dataset</code> that returns another <code>Dataset</code>, so these may be chained, e.g. <code>const processedDataset = rawDataset.filter(...).map(...).batch(...)</code>.</p> <p>Data loading and transformation is done in a lazy, streaming fashion. The dataset may be iterated over multiple times; each iteration starts the data loading anew and recapitulates the transformations.</p> <p>A <code>Dataset</code> is typically processed as a stream of unbatched examples -- i.e., its transformations are applied one example at a time. Batching produces a new <code>Dataset</code> where each element is a batch. Batching should usually come last in a pipeline, because data transformations are easier to express on a per-example basis than on a per-batch basis.</p> <p>The following code examples are calling <code>await dataset.forEachAsync(...)</code> to iterate once over the entire dataset in order to print out the data.</p> </div> <div class="method-list"> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.batch" href="#tf.data.Dataset.batch"> batch</a> <span class="signature">(batchSize, smallLastBatch?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L136-L159" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Groups elements into batches.</p> <p>It is assumed that each of the incoming dataset elements has the same structure -- i.e. the same set of keys at each location in an object hierarchy. For each key, the resulting <code>Dataset</code> provides a batched element collecting all of the incoming values for that key.</p> <p>Incoming primitives are grouped into a 1-D Tensor. Incoming Tensors are grouped into a new Tensor where the 0th axis is the batch dimension. Incoming arrays are converted to Tensor and then batched. A nested array is interpreted as an n-D Tensor, so the batched result has n+1 dimensions. An array that cannot be converted to Tensor produces an error.</p> <p>If an array should not be batched as a unit, it should first be converted to an object with integer keys.</p> <p>Here are a few examples:</p> <p>Batch a dataset of numbers:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>]).<span class="hljs-title function_">batch</span>(<span class="hljs-number">4</span>); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> e.<span class="hljs-title function_">print</span>()); </code></pre> <p>Batch a dataset of arrays:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> b = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([[<span class="hljs-number">1</span>], [<span class="hljs-number">2</span>], [<span class="hljs-number">3</span>], [<span class="hljs-number">4</span>], [<span class="hljs-number">5</span>], [<span class="hljs-number">6</span>], [<span class="hljs-number">7</span>], [<span class="hljs-number">8</span>]]).<span class="hljs-title function_">batch</span>(<span class="hljs-number">4</span>); <span class="hljs-keyword">await</span> b.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> e.<span class="hljs-title function_">print</span>()); </code></pre> <p>Batch a dataset of objects:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> c = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([{<span class="hljs-attr">a</span>: <span class="hljs-number">1</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">11</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">2</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">12</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">3</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">13</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">4</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">14</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">5</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">15</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">6</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">16</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">7</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">17</span>}, {<span class="hljs-attr">a</span>: <span class="hljs-number">8</span>, <span class="hljs-attr">b</span>: <span class="hljs-number">18</span>}]).<span class="hljs-title function_">batch</span>(<span class="hljs-number">4</span>); <span class="hljs-keyword">await</span> c.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> { <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;{&#x27;</span>); <span class="hljs-keyword">for</span>(<span class="hljs-keyword">var</span> key <span class="hljs-keyword">in</span> e) { <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(key+<span class="hljs-string">&#x27;:&#x27;</span>); e[key].<span class="hljs-title function_">print</span>(); } <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-string">&#x27;}&#x27;</span>); }) </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">batchSize</span> <span class="param-type">(number)</span> <span class="param-docs">The number of elements desired per batch.</span> </li> <li class="parameter"> <span class="param-name">smallLastBatch</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to emit the final batch when it has fewer than batchSize elements. Default true.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.concatenate" href="#tf.data.Dataset.concatenate"> concatenate</a> <span class="signature">(dataset)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L176-L196" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Concatenates this <code>Dataset</code> with another.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> b = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]); <span class="hljs-keyword">const</span> c = a.<span class="hljs-title function_">concatenate</span>(b); <span class="hljs-keyword">await</span> c.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">dataset</span> <span class="param-type">(<a href="#class:data.Dataset">tf.data.Dataset</a>)</span> <span class="param-docs">A <code>Dataset</code> to be concatenated onto this one.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.filter" href="#tf.data.Dataset.filter"> filter</a> <span class="signature">(predicate)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L214-L228" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Filters this dataset according to <code>predicate</code>.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">9</span>, <span class="hljs-number">10</span>]) .<span class="hljs-title function_">filter</span>(<span class="hljs-function"><span class="hljs-params">x</span> =&gt;</span> x%<span class="hljs-number">2</span> === <span class="hljs-number">0</span>); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">predicate</span> <span class="param-type">((value: T) =&gt; boolean)</span> <span class="param-docs">A function mapping a dataset element to a boolean or a <code>Promise</code> for one.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.forEachAsync" href="#tf.data.Dataset.forEachAsync"> forEachAsync</a> <span class="signature">(f)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L246-L248" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Apply a function to every element of the dataset.</p> <p>After the function is applied to a dataset element, any Tensors contained within that element are disposed.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">f</span> <span class="param-type">((input: T) =&gt; void)</span> <span class="param-docs">A function to apply to each dataset element.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;void&gt;</span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.map" href="#tf.data.Dataset.map"> map</a> <span class="signature">(transform)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L265-L270" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Maps this dataset through a 1-to-1 transform.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]).<span class="hljs-title function_">map</span>(<span class="hljs-function"><span class="hljs-params">x</span> =&gt;</span> x*x); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">transform</span> <span class="param-type">((value: T) =&gt; tf.void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string})</span> <span class="param-docs">A function mapping a dataset element to a transformed dataset element.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.mapAsync" href="#tf.data.Dataset.mapAsync"> mapAsync</a> <span class="signature">(transform)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L295-L301" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Maps this dataset through an async 1-to-1 transform.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]).<span class="hljs-title function_">mapAsync</span>(<span class="hljs-function"><span class="hljs-params">x</span> =&gt;</span> <span class="hljs-keyword">new</span> <span class="hljs-title class_">Promise</span>(<span class="hljs-keyword">function</span>(<span class="hljs-params">resolve</span>){ <span class="hljs-built_in">setTimeout</span>(<span class="hljs-function">() =&gt;</span> { <span class="hljs-title function_">resolve</span>(x * x); }, <span class="hljs-title class_">Math</span>.<span class="hljs-title function_">random</span>()*<span class="hljs-number">1000</span> + <span class="hljs-number">500</span>); })); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-keyword">await</span> a.<span class="hljs-title function_">toArray</span>()); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">transform</span> <span class="param-type">((value: T) =&gt; Promise&lt;tf.void|number|string|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|<a href="#class:Tensor">tf.Tensor</a>|<a href="#class:Tensor">tf.Tensor</a>[]|{[key: string]:<a href="#class:Tensor">tf.Tensor</a>|number|string}&gt;)</span> <span class="param-docs">A function mapping a dataset element to a <code>Promise</code> for a transformed dataset element. This transform is responsible for disposing any intermediate <code>Tensor</code>s, i.e. by wrapping its computation in <code>tf.tidy()</code>; that cannot be automated here (as it is in the synchronous <code>map()</code> case).</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.prefetch" href="#tf.data.Dataset.prefetch"> prefetch</a> <span class="signature">(bufferSize)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L312-L321" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <code>Dataset</code> that prefetches elements from this dataset.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">bufferSize</span> <span class="param-type">(number)</span> <span class="param-docs">: An integer specifying the number of elements to be prefetched.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.repeat" href="#tf.data.Dataset.repeat"> repeat</a> <span class="signature">(count?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L341-L365" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Repeats this dataset <code>count</code> times.</p> <p>NOTE: If this dataset is a function of global state (e.g. a random number generator), then different repetitions may produce different elements.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>]).<span class="hljs-title function_">repeat</span>(<span class="hljs-number">3</span>); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">count</span> <span class="param-type">(number)</span> <span class="param-docs">: (Optional) An integer, representing the number of times the dataset should be repeated. The default behavior (if <code>count</code> is <code>undefined</code> or negative) is for the dataset be repeated indefinitely.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.skip" href="#tf.data.Dataset.skip"> skip</a> <span class="signature">(count)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L384-L404" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <code>Dataset</code> that skips <code>count</code> initial elements from this dataset.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]).<span class="hljs-title function_">skip</span>(<span class="hljs-number">3</span>); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">count</span> <span class="param-type">(number)</span> <span class="param-docs">: The number of elements of this dataset that should be skipped to form the new dataset. If <code>count</code> is greater than the size of this dataset, the new dataset will contain no elements. If <code>count</code> is <code>undefined</code> or negative, skips the entire dataset.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.shuffle" href="#tf.data.Dataset.shuffle"> shuffle</a> <span class="signature">(bufferSize, seed?, reshuffleEachIteration?)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L431-L454" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Pseudorandomly shuffles the elements of this dataset. This is done in a streaming manner, by sampling from a given number of prefetched elements.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]).<span class="hljs-title function_">shuffle</span>(<span class="hljs-number">3</span>); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">bufferSize</span> <span class="param-type">(number)</span> <span class="param-docs">: An integer specifying the number of elements from this dataset from which the new dataset will sample.</span> </li> <li class="parameter"> <span class="param-name">seed</span> <span class="param-type">(string)</span> <span class="param-docs">: (Optional) An integer specifying the random seed that will be used to create the distribution.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">reshuffleEachIteration</span> <span class="param-type">(boolean)</span> <span class="param-docs">: (Optional) A boolean, which if true indicates that the dataset should be pseudorandomly reshuffled each time it is iterated over. If false, elements will be returned in the same shuffled order on each iteration. (Defaults to <code>true</code>.)</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.take" href="#tf.data.Dataset.take"> take</a> <span class="signature">(count)</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L473-L490" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <code>Dataset</code> with at most <code>count</code> initial elements from this dataset.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]).<span class="hljs-title function_">take</span>(<span class="hljs-number">3</span>); <span class="hljs-keyword">await</span> a.<span class="hljs-title function_">forEachAsync</span>(<span class="hljs-function"><span class="hljs-params">e</span> =&gt;</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(e)); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">count</span> <span class="param-type">(number)</span> <span class="param-docs">: The number of elements of this dataset that should be taken to form the new dataset. If <code>count</code> is <code>undefined</code> or negative, or if <code>count</code> is greater than the size of this dataset, the new dataset will contain all elements of this dataset.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:data.Dataset">tf.data.Dataset</a></span> </div> </div> <div class="symbol function method"> <div class="symbol-header"> <a class="symbol-link" name="tf.data.Dataset.toArray" href="#tf.data.Dataset.toArray"> toArray</a> <span class="signature">()</span> <span class="chip">method</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-data/src/dataset.ts#L508-L513" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Collect all elements of this dataset into an array.</p> <p>Obviously this will succeed only for small datasets that fit in memory. Useful for testing and generally should be avoided if possible.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = tf.<span class="hljs-property">data</span>.<span class="hljs-title function_">array</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>, <span class="hljs-number">6</span>]); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(<span class="hljs-keyword">await</span> a.<span class="hljs-title function_">toArray</span>()); </code></pre> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;T[]&gt;</span> </div> </div> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Visualization" href="#Visualization" class="symbol-link">Visualization</a> </div> <div class="description"> <p>tfjs-vis is a companion library for TensorFlow.js that provides in-browser visualization capabilities for training and understanding models. <a href='/api_vis/latest/'>API docs for tfjs-vis are available here</a> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Util" href="#Util" class="symbol-link">Util</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Util-" href="#Util-" class="symbol-link"> Util / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.assert" href="#util.assert"> tf.util.assert</a> <span class="signature">(expr, msg)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util_base.ts#L151-L155" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Asserts that the expression is true. Otherwise throws an error with the provided message.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = <span class="hljs-number">2</span>; tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">assert</span>(x === <span class="hljs-number">2</span>, <span class="hljs-string">&#x27;x is not 2&#x27;</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">expr</span> <span class="param-type">(boolean)</span> <span class="param-docs">The expression to assert (as a boolean).</span> </li> <li class="parameter"> <span class="param-name">msg</span> <span class="param-type">(() =&gt; string)</span> <span class="param-docs">A function that returns the message to report when throwing an error. We use a function for performance reasons.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.createShuffledIndices" href="#util.createShuffledIndices"> tf.util.createShuffledIndices</a> <span class="signature">(n)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util_base.ts#L274-L281" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a new array with randomized indices to a given quantity.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> randomTen = tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">createShuffledIndices</span>(<span class="hljs-number">10</span>); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(randomTen); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">n</span> <span class="param-type">(number)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Uint32Array</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="decodeString" href="#decodeString"> tf.decodeString</a> <span class="signature">(bytes, encoding?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util.ts#L131-L134" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Decodes the provided bytes into a string using the provided encoding scheme.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">bytes</span> <span class="param-type">(Uint8Array)</span> <span class="param-docs">The bytes to decode.</span> </li> <li class="parameter"> <span class="param-name">encoding</span> <span class="param-type">(string)</span> <span class="param-docs">The encoding scheme. Defaults to utf-8.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">string</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="encodeString" href="#encodeString"> tf.encodeString</a> <span class="signature">(s, encoding?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util.ts#L118-L121" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Encodes the provided string into bytes using the provided encoding scheme.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">s</span> <span class="param-type">(string)</span> <span class="param-docs">The string to encode.</span> </li> <li class="parameter"> <span class="param-name">encoding</span> <span class="param-type">(string)</span> <span class="param-docs">The encoding scheme. Defaults to utf-8.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Uint8Array</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="fetch" href="#fetch"> tf.fetch</a> <span class="signature">(path, requestInits?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util.ts#L105-L108" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a platform-specific implementation of <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API"><code>fetch</code></a>.</p> <p>If <code>fetch</code> is defined on the global object (<code>window</code>, <code>process</code>, etc.), <code>tf.util.fetch</code> returns that function.</p> <p>If not, <code>tf.util.fetch</code> returns a platform-specific solution.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> resource = <span class="hljs-keyword">await</span> tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">fetch</span>(<span class="hljs-string">&#x27;https://cdn.jsdelivr.net/npm/@tensorflow/tfjs&#x27;</span>); <span class="hljs-comment">// handle response</span> </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">path</span> <span class="param-type">(string)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">requestInits</span> <span class="param-type">(RequestInit)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;Response&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.flatten" href="#util.flatten"> tf.util.flatten</a> <span class="signature">(arr, result?, skipTypedArray?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util.ts#L165-L193" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Flattens an arbitrarily nested array.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = [[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">5</span>, [<span class="hljs-number">6</span>, [<span class="hljs-number">7</span>]]]]; <span class="hljs-keyword">const</span> flat = tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">flatten</span>(a); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(flat); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">arr</span> <span class="param-type">(number|boolean|string|Promise&lt;number&gt;|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|RecursiveArray|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>&gt;)</span> <span class="param-docs">The nested array to flatten.</span> </li> <li class="parameter"> <span class="param-name">result</span> <span class="param-type">(number|boolean|string|Promise&lt;number&gt;|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>[])</span> <span class="param-docs">The destination array which holds the elements.</span> <span class="chip"> Optional </span> </li> <li class="parameter"> <span class="param-name">skipTypedArray</span> <span class="param-type">(boolean)</span> <span class="param-docs">If true, avoids flattening the typed arrays. Defaults to false.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">number|boolean|string|Promise&lt;number&gt;|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>[]</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.now" href="#util.now"> tf.util.now</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util.ts#L85-L87" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the current high-resolution time in milliseconds relative to an arbitrary time in the past. It works across different platforms (node.js, browsers).</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">now</span>()); </code></pre> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">number</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.shuffle" href="#util.shuffle"> tf.util.shuffle</a> <span class="signature">(array)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util_base.ts#L34-L47" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Shuffles the array in-place using Fisher-Yates algorithm.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = [<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">5</span>]; tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">shuffle</span>(a); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(a); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">array</span> <span class="param-type">(<a href="#any">tf.any()</a>[]|Uint32Array|Int32Array| Float32Array)</span> <span class="param-docs">The array to shuffle in-place.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.shuffleCombo" href="#util.shuffleCombo"> tf.util.shuffleCombo</a> <span class="signature">(array, array2)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util_base.ts#L65-L88" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Shuffles two arrays in-place the same way using Fisher-Yates algorithm.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> a = [<span class="hljs-number">1</span>,<span class="hljs-number">2</span>,<span class="hljs-number">3</span>,<span class="hljs-number">4</span>,<span class="hljs-number">5</span>]; <span class="hljs-keyword">const</span> b = [<span class="hljs-number">11</span>,<span class="hljs-number">22</span>,<span class="hljs-number">33</span>,<span class="hljs-number">44</span>,<span class="hljs-number">55</span>]; tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">shuffleCombo</span>(a, b); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(a, b); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">array</span> <span class="param-type">(<a href="#any">tf.any()</a>[]|Uint32Array|Int32Array|Float32Array)</span> <span class="param-docs">The first array to shuffle in-place.</span> </li> <li class="parameter"> <span class="param-name">array2</span> <span class="param-type">(<a href="#any">tf.any()</a>[]|Uint32Array|Int32Array|Float32Array)</span> <span class="param-docs">The second array to shuffle in-place with the same permutation as the first array.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="util.sizeFromShape" href="#util.sizeFromShape"> tf.util.sizeFromShape</a> <span class="signature">(shape)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/util_base.ts#L181-L191" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the size (number of elements) of the tensor given its shape.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> shape = [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>, <span class="hljs-number">2</span>]; <span class="hljs-keyword">const</span> size = tf.<span class="hljs-property">util</span>.<span class="hljs-title function_">sizeFromShape</span>(shape); <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(size); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">shape</span> <span class="param-type">(number[])</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">number</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Backends" href="#Backends" class="symbol-link">Backends</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Backends-" href="#Backends-" class="symbol-link"> Backends / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="backend" href="#backend"> tf.backend</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L372-L374" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Gets the current backend. If no backends have been initialized, this will attempt to initialize the best backend. Will throw an error if the highest priority backend has async initialization, in which case you should call 'await tf.ready()' before running other code.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">KernelBackend</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="getBackend" href="#getBackend"> tf.getBackend</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L312-L314" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns the current backend name (cpu, webgl, etc). The backend is responsible for creating tensors and executing operations on those tensors.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">string</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="ready" href="#ready"> tf.ready</a> <span class="signature">()</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L302-L304" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Returns a promise that resolves when the currently selected backend (or the highest priority one) has initialized. Await this promise when you are using a backend that has async initialization.</p> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;void&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="registerBackend" href="#registerBackend"> tf.registerBackend</a> <span class="signature">(name, factory, priority?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L358-L362" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Registers a global backend. The registration should happen when importing a module file (e.g. when importing <code>backend_webgl.ts</code>), and is used for modular builds (e.g. custom tfjs bundle with only webgl support).</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">factory</span> <span class="param-type">(() =&gt; KernelBackend | Promise&lt;KernelBackend&gt;)</span> <span class="param-docs">The backend factory function. When called, it should return a backend instance, or a promise of an instance.</span> </li> <li class="parameter"> <span class="param-name">priority</span> <span class="param-type">(number)</span> <span class="param-docs">The priority of the backend (higher = more important). In case multiple backends are registered, the priority is used to find the best backend. Defaults to 1.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">boolean</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="removeBackend" href="#removeBackend"> tf.removeBackend</a> <span class="signature">(name)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L321-L323" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Removes a backend and the registered factory.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">name</span> <span class="param-type">(string)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="setBackend" href="#setBackend"> tf.setBackend</a> <span class="signature">(backendName)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/globals.ts#L291-L293" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Sets the backend (cpu, webgl, wasm, etc) responsible for creating tensors and executing operations on those tensors. Returns a promise that resolves to a boolean if the backend initialization was successful.</p> <p>Note this disposes the current backend, if any, as well as any tensors associated with it. A new backend is initialized, even if it is of the same type as the previous one.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">backendName</span> <span class="param-type">(string)</span> <span class="param-docs">The name of the backend. Currently supports <code>'webgl'|'cpu'</code> in the browser, <code>'tensorflow'</code> under node.js (requires tfjs-node), and <code>'wasm'</code> (requires tfjs-backend-wasm).</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;boolean&gt;</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Browser" href="#Browser" class="symbol-link">Browser</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Browser-" href="#Browser-" class="symbol-link"> Browser / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="browser.draw" href="#browser.draw"> tf.browser.draw</a> <span class="signature">(image, canvas, options?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/browser.ts#L416-L434" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Draws a <a href="#class:Tensor">tf.Tensor</a> to a canvas.</p> <p>When the dtype of the input is 'float32', we assume values in the range [0-1]. Otherwise, when input is 'int32', we assume values in the range [0-255].</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">image</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">The tensor to draw on the canvas. Must match one of these shapes:</p> <ul> <li>Rank-2 with shape <code>[height, width</code>]: Drawn as grayscale.</li> <li>Rank-3 with shape <code>[height, width, 1]</code>: Drawn as grayscale.</li> <li>Rank-3 with shape <code>[height, width, 3]</code>: Drawn as RGB with alpha set in <code>imageOptions</code> (defaults to 1, which is opaque).</li> <li>Rank-3 with shape <code>[height, width, 4]</code>: Drawn as RGBA.</li> </ul> </span> </li> <li class="parameter"> <span class="param-name">canvas</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement">HTMLCanvasElement</a>)</span> <span class="param-docs">The canvas to draw to.</span> </li> <li class="parameter"> <span class="param-name">options</span> <span class="param-type">(Object)</span> <span class="param-docs">The configuration arguments for image to be drawn and the canvas to draw to.</span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">imageOptions</span> <span class="param-type">(ImageOptions)</span> <span class="param-docs">Optional. An object of options to customize the values of image tensor.</span> </li> <li class="parameter config-param"> <span class="param-name">contextOptions</span> <span class="param-type">(ContextOptions)</span> <span class="param-docs">Optional. An object to configure the context of the canvas to draw to.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">void</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="browser.fromPixels" href="#browser.fromPixels"> tf.browser.fromPixels</a> <span class="signature">(pixels, numChannels?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/browser.ts#L66-L173" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> from an image.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> image = <span class="hljs-keyword">new</span> <span class="hljs-title class_">ImageData</span>(<span class="hljs-number">1</span>, <span class="hljs-number">1</span>); image.<span class="hljs-property">data</span>[<span class="hljs-number">0</span>] = <span class="hljs-number">100</span>; image.<span class="hljs-property">data</span>[<span class="hljs-number">1</span>] = <span class="hljs-number">150</span>; image.<span class="hljs-property">data</span>[<span class="hljs-number">2</span>] = <span class="hljs-number">200</span>; image.<span class="hljs-property">data</span>[<span class="hljs-number">3</span>] = <span class="hljs-number">255</span>; tf.<span class="hljs-property">browser</span>.<span class="hljs-title function_">fromPixels</span>(image).<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">pixels</span> <span class="param-type">(PixelData|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/ImageData">ImageData</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLImageElement">HTMLImageElement</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement">HTMLCanvasElement</a>| <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement">HTMLVideoElement</a>|ImageBitmap)</span> <span class="param-docs">The input image to construct the tensor from. The supported image types are all 4-channel. You can also pass in an image object with following attributes: <code>{data: Uint8Array; width: number; height: number}</code></span> </li> <li class="parameter"> <span class="param-name">numChannels</span> <span class="param-type">(number)</span> <span class="param-docs">The number of channels of the output tensor. A numChannels value less than 4 allows you to ignore channels. Defaults to 3 (ignores alpha channel of input image).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor3D</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="browser.fromPixelsAsync" href="#browser.fromPixelsAsync"> tf.browser.fromPixelsAsync</a> <span class="signature">(pixels, numChannels?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/browser.ts#L227-L270" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Creates a <a href="#class:Tensor">tf.Tensor</a> from an image in async way.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> image = <span class="hljs-keyword">new</span> <span class="hljs-title class_">ImageData</span>(<span class="hljs-number">1</span>, <span class="hljs-number">1</span>); image.<span class="hljs-property">data</span>[<span class="hljs-number">0</span>] = <span class="hljs-number">100</span>; image.<span class="hljs-property">data</span>[<span class="hljs-number">1</span>] = <span class="hljs-number">150</span>; image.<span class="hljs-property">data</span>[<span class="hljs-number">2</span>] = <span class="hljs-number">200</span>; image.<span class="hljs-property">data</span>[<span class="hljs-number">3</span>] = <span class="hljs-number">255</span>; (<span class="hljs-keyword">await</span> tf.<span class="hljs-property">browser</span>.<span class="hljs-title function_">fromPixelsAsync</span>(image)).<span class="hljs-title function_">print</span>(); </code></pre> <p>This API is the async version of fromPixels. The API will first check |WRAP_TO_IMAGEBITMAP| flag, and try to wrap the input to imageBitmap if the flag is set to true.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">pixels</span> <span class="param-type">(PixelData|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/ImageData">ImageData</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLImageElement">HTMLImageElement</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement">HTMLCanvasElement</a>| <a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement">HTMLVideoElement</a>|ImageBitmap)</span> <span class="param-docs">The input image to construct the tensor from. The supported image types are all 4-channel. You can also pass in an image object with following attributes: <code>{data: Uint8Array; width: number; height: number}</code></span> </li> <li class="parameter"> <span class="param-name">numChannels</span> <span class="param-type">(number)</span> <span class="param-docs">The number of channels of the output tensor. A numChannels value less than 4 allows you to ignore channels. Defaults to 3 (ignores alpha channel of input image).</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;<a href="#class:Tensor">tf.Tensor</a>&gt;</span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="browser.toPixels" href="#browser.toPixels"> tf.browser.toPixels</a> <span class="signature">(img, canvas?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-core/src/ops/browser.ts#L319-L394" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Draws a <a href="#class:Tensor">tf.Tensor</a> of pixel values to a byte array or optionally a canvas.</p> <p>When the dtype of the input is 'float32', we assume values in the range [0-1]. Otherwise, when input is 'int32', we assume values in the range [0-255].</p> <p>Returns a promise that resolves when the canvas has been drawn to.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">img</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor2D</a>|<a href="#class:Tensor">tf.Tensor3D</a>|<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray">TypedArray</a>|Array)</span> <span class="param-docs">A rank-2 tensor with shape <code>[height, width]</code>, or a rank-3 tensor of shape <code>[height, width, numChannels]</code>. If rank-2, draws grayscale. If rank-3, must have depth of 1, 3 or 4. When depth of 1, draws grayscale. When depth of 3, we draw with the first three components of the depth dimension corresponding to r, g, b and alpha = 1. When depth of 4, all four components of the depth dimension correspond to r, g, b, a.</span> </li> <li class="parameter"> <span class="param-name">canvas</span> <span class="param-type">(<a target="_blank" rel="noopener" href="https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement">HTMLCanvasElement</a>)</span> <span class="param-docs">The canvas to draw to.</span> <span class="chip"> Optional </span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">Promise&lt;Uint8ClampedArray&gt;</span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Metrics" href="#Metrics" class="symbol-link">Metrics</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Metrics-" href="#Metrics-" class="symbol-link"> Metrics / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.binaryAccuracy" href="#metrics.binaryAccuracy"> tf.metrics.binaryAccuracy</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L44-L46" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Binary accuracy metric function.</p> <p><code>yTrue</code> and <code>yPred</code> can have 0-1 values. Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]], [<span class="hljs-number">2</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>]], [<span class="hljs-number">2</span>, <span class="hljs-number">4</span>]); <span class="hljs-keyword">const</span> accuracy = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">binaryAccuracy</span>(x, y); accuracy.<span class="hljs-title function_">print</span>(); </code></pre> <p><code>yTrue</code> and <code>yPred</code> can also have floating-number values between 0 and 1, in which case the values will be thresholded at 0.5 to yield 0-1 values (i.e., a value &gt;= 0.5 and &lt;= 1.0 is interpreted as 1).</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">0.2</span>, <span class="hljs-number">0.4</span>, <span class="hljs-number">0.6</span>, <span class="hljs-number">0.8</span>, <span class="hljs-number">0.2</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.4</span>, <span class="hljs-number">0.7</span>]); <span class="hljs-keyword">const</span> accuracy = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">binaryAccuracy</span>(x, y); accuracy.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Binary Tensor of truth.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Binary Tensor of prediction.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.binaryCrossentropy" href="#metrics.binaryCrossentropy"> tf.metrics.binaryCrossentropy</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L65-L67" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Binary crossentropy metric function.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>], [<span class="hljs-number">1</span>], [<span class="hljs-number">1</span>], [<span class="hljs-number">1</span>]]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>], [<span class="hljs-number">0</span>], [<span class="hljs-number">0.5</span>], [<span class="hljs-number">1</span>]]); <span class="hljs-keyword">const</span> crossentropy = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">binaryCrossentropy</span>(x, y); crossentropy.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Binary Tensor of truth.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Binary Tensor of prediction, probabilities for the <code>1</code> case.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.categoricalAccuracy" href="#metrics.categoricalAccuracy"> tf.metrics.categoricalAccuracy</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L111-L113" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Categorical accuracy metric function.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>]]); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0.1</span>, <span class="hljs-number">0.8</span>, <span class="hljs-number">0.05</span>, <span class="hljs-number">0.05</span>], [<span class="hljs-number">0.1</span>, <span class="hljs-number">0.05</span>, <span class="hljs-number">0.05</span>, <span class="hljs-number">0.8</span>]]); <span class="hljs-keyword">const</span> accuracy = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">categoricalAccuracy</span>(x, y); accuracy.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Binary Tensor of truth: one-hot encoding of categories.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Binary Tensor of prediction: probabilities or logits for the same categories as in <code>yTrue</code>.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.categoricalCrossentropy" href="#metrics.categoricalCrossentropy"> tf.metrics.categoricalCrossentropy</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L126-L128" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Categorical crossentropy between an output tensor and a target tensor.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs"></span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs"></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.cosineProximity" href="#metrics.cosineProximity"> tf.metrics.cosineProximity</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L229-L231" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Loss or metric function: Cosine proximity.</p> <p>Mathematically, cosine proximity is defined as: <code>-sum(l2Normalize(yTrue) * l2Normalize(yPred))</code>, wherein <code>l2Normalize()</code> normalizes the L2 norm of the input to 1 and <code>*</code> represents element-wise multiplication.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> yTrue = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>]]); <span class="hljs-keyword">const</span> yPred = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span> / <span class="hljs-title class_">Math</span>.<span class="hljs-title function_">sqrt</span>(<span class="hljs-number">2</span>), <span class="hljs-number">1</span> / <span class="hljs-title class_">Math</span>.<span class="hljs-title function_">sqrt</span>(<span class="hljs-number">2</span>)], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>]]); <span class="hljs-keyword">const</span> proximity = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">cosineProximity</span>(yTrue, yPred); proximity.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Truth Tensor.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Prediction Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.meanAbsoluteError" href="#metrics.meanAbsoluteError"> tf.metrics.meanAbsoluteError</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L253-L255" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Loss or metric function: Mean absolute error.</p> <p>Mathematically, mean absolute error is defined as: <code>mean(abs(yPred - yTrue))</code>, wherein the <code>mean</code> is applied over feature dimensions.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> yTrue = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]]); <span class="hljs-keyword">const</span> yPred = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [-<span class="hljs-number">2</span>, -<span class="hljs-number">3</span>]]); <span class="hljs-keyword">const</span> mse = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">meanAbsoluteError</span>(yTrue, yPred); mse.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Truth Tensor.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Prediction Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.meanAbsolutePercentageError" href="#metrics.meanAbsolutePercentageError"> tf.metrics.meanAbsolutePercentageError</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L275-L278" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Loss or metric function: Mean absolute percentage error.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> yTrue = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">10</span>, <span class="hljs-number">20</span>]]); <span class="hljs-keyword">const</span> yPred = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">11</span>, <span class="hljs-number">24</span>]]); <span class="hljs-keyword">const</span> mse = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">meanAbsolutePercentageError</span>(yTrue, yPred); mse.<span class="hljs-title function_">print</span>(); </code></pre> <p>Aliases: <code>tf.metrics.MAPE</code>, <code>tf.metrics.mape</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Truth Tensor.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Prediction Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.meanSquaredError" href="#metrics.meanSquaredError"> tf.metrics.meanSquaredError</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L306-L308" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Loss or metric function: Mean squared error.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> yTrue = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); <span class="hljs-keyword">const</span> yPred = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [-<span class="hljs-number">3</span>, -<span class="hljs-number">4</span>]]); <span class="hljs-keyword">const</span> mse = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">meanSquaredError</span>(yTrue, yPred); mse.<span class="hljs-title function_">print</span>(); </code></pre> <p>Aliases: <code>tf.metrics.MSE</code>, <code>tf.metrics.mse</code>.</p> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Truth Tensor.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Prediction Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.precision" href="#metrics.precision"> tf.metrics.precision</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L165-L167" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the precision of the predictions with respect to the labels.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>( [ [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>] ] ); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>( [ [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>] ] ); <span class="hljs-keyword">const</span> precision = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">precision</span>(x, y); precision.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The ground truth values. Expected to contain only 0-1 values.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The predicted values. Expected to contain only 0-1 values.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.r2Score" href="#metrics.r2Score"> tf.metrics.r2Score</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L333-L335" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes R2 score.</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> yTrue = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">3</span>, <span class="hljs-number">4</span>]]); <span class="hljs-keyword">const</span> yPred = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [-<span class="hljs-number">3</span>, -<span class="hljs-number">4</span>]]); <span class="hljs-keyword">const</span> r2Score = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">r2Score</span>(yTrue, yPred); r2Score.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Truth Tensor.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Prediction Tensor.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.recall" href="#metrics.recall"> tf.metrics.recall</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L204-L206" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Computes the recall of the predictions with respect to the labels.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> x = tf.<span class="hljs-title function_">tensor2d</span>( [ [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>] ] ); <span class="hljs-keyword">const</span> y = tf.<span class="hljs-title function_">tensor2d</span>( [ [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>] ] ); <span class="hljs-keyword">const</span> recall = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">recall</span>(x, y); recall.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The ground truth values. Expected to contain only 0-1 values.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">The predicted values. Expected to contain only 0-1 values.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="metrics.sparseCategoricalAccuracy" href="#metrics.sparseCategoricalAccuracy"> tf.metrics.sparseCategoricalAccuracy</a> <span class="signature">(yTrue, yPred)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/exports_metrics.ts#L88-L91" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Sparse categorical accuracy metric function.</p> <p>Example:</p> <pre class="hljs"><code class="hljs language-js"> <span class="hljs-keyword">const</span> yTrue = tf.<span class="hljs-title function_">tensor1d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">0</span>]); <span class="hljs-keyword">const</span> yPred = tf.<span class="hljs-title function_">tensor2d</span>( [[<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0.4</span>, <span class="hljs-number">0.6</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">0.6</span>, <span class="hljs-number">0.4</span>], [<span class="hljs-number">0.7</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0</span>]]); <span class="hljs-keyword">const</span> crossentropy = tf.<span class="hljs-property">metrics</span>.<span class="hljs-title function_">sparseCategoricalAccuracy</span>(yTrue, yPred); crossentropy.<span class="hljs-title function_">print</span>(); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">yTrue</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">True labels: indices.</span> </li> <li class="parameter"> <span class="param-name">yPred</span> <span class="param-type">(<a href="#class:Tensor">tf.Tensor</a>)</span> <span class="param-docs">Predicted probabilities or logits.</span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type"><a href="#class:Tensor">tf.Tensor</a></span> </div> </div> </div> <div class="api-section"> <div class="heading"> <div class="title"> <a name="Callbacks" href="#Callbacks" class="symbol-link">Callbacks</a> </div> <div class="description"> </div> </div> <div class="subheading"> <div class="title"> <a name="Callbacks-" href="#Callbacks-" class="symbol-link"> Callbacks / </a> </div> <div class="description"> </div> </div> <div class="symbol function "> <div class="symbol-header"> <a class="symbol-link" name="callbacks.earlyStopping" href="#callbacks.earlyStopping"> tf.callbacks.earlyStopping</a> <span class="signature">(args?)</span> <span class="chip">function</span> <span class="source-link"> <a href="https://github.com/tensorflow/tfjs/tree/tfjs-v4.22.0/tfjs-layers/src/callbacks.ts#L251-L253" target=_blank>Source</a> </span> </div> <div class="documentation"><p>Factory function for a Callback that stops training when a monitored quantity has stopped improving.</p> <p>Early stopping is a type of regularization, and protects model against overfitting.</p> <p>The following example based on fake data illustrates how this callback can be used during <code>tf.LayersModel.fit()</code>:</p> <pre class="hljs"><code class="hljs language-js"><span class="hljs-keyword">const</span> model = tf.<span class="hljs-title function_">sequential</span>(); model.<span class="hljs-title function_">add</span>(tf.<span class="hljs-property">layers</span>.<span class="hljs-title function_">dense</span>({ <span class="hljs-attr">units</span>: <span class="hljs-number">3</span>, <span class="hljs-attr">activation</span>: <span class="hljs-string">&#x27;softmax&#x27;</span>, <span class="hljs-attr">kernelInitializer</span>: <span class="hljs-string">&#x27;ones&#x27;</span>, <span class="hljs-attr">inputShape</span>: [<span class="hljs-number">2</span>] })); <span class="hljs-keyword">const</span> xs = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">3</span>, <span class="hljs-number">4</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> ys = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">1</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>]], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); <span class="hljs-keyword">const</span> xsVal = tf.<span class="hljs-title function_">tensor2d</span>([<span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">2</span>, <span class="hljs-number">2</span>]); <span class="hljs-keyword">const</span> ysVal = tf.<span class="hljs-title function_">tensor2d</span>([[<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">1</span>], [<span class="hljs-number">0</span>, <span class="hljs-number">1</span>, <span class="hljs-number">0</span>]], [<span class="hljs-number">2</span>, <span class="hljs-number">3</span>]); model.<span class="hljs-title function_">compile</span>( {<span class="hljs-attr">loss</span>: <span class="hljs-string">&#x27;categoricalCrossentropy&#x27;</span>, <span class="hljs-attr">optimizer</span>: <span class="hljs-string">&#x27;sgd&#x27;</span>, <span class="hljs-attr">metrics</span>: [<span class="hljs-string">&#x27;acc&#x27;</span>]}); <span class="hljs-comment">// Without the EarlyStopping callback, the val_acc value would be:</span> <span class="hljs-comment">// 0.5, 0.5, 0.5, 0.5, ...</span> <span class="hljs-comment">// With val_acc being monitored, training should stop after the 2nd epoch.</span> <span class="hljs-keyword">const</span> history = <span class="hljs-keyword">await</span> model.<span class="hljs-title function_">fit</span>(xs, ys, { <span class="hljs-attr">epochs</span>: <span class="hljs-number">10</span>, <span class="hljs-attr">validationData</span>: [xsVal, ysVal], <span class="hljs-attr">callbacks</span>: tf.<span class="hljs-property">callbacks</span>.<span class="hljs-title function_">earlyStopping</span>({<span class="hljs-attr">monitor</span>: <span class="hljs-string">&#x27;val_acc&#x27;</span>}) }); <span class="hljs-comment">// Expect to see a length-2 array.</span> <span class="hljs-variable language_">console</span>.<span class="hljs-title function_">log</span>(history.<span class="hljs-property">history</span>.<span class="hljs-property">val_acc</span>); </code></pre> </div> <div class="parameter-list "> <div class="heading">Parameters:</div> <ul> <li class="parameter"> <span class="param-name">args</span> <span class="param-type">(Object)</span> <span class="param-docs"></span> <span class="chip"> Optional </span> </li> <li class="parameter config-param"> <span class="param-name">monitor</span> <span class="param-type">(string)</span> <span class="param-docs">Quantity to be monitored.</p> <p>Defaults to 'val_loss'.</span> </li> <li class="parameter config-param"> <span class="param-name">minDelta</span> <span class="param-type">(number)</span> <span class="param-docs">Minimum change in the monitored quantity to qualify as improvement, i.e., an absolute change of less than <code>minDelta</code> will count as no improvement.</p> <p>Defaults to 0.</span> </li> <li class="parameter config-param"> <span class="param-name">patience</span> <span class="param-type">(number)</span> <span class="param-docs">Number of epochs with no improvement after which training will be stopped.</p> <p>Defaults to 0.</span> </li> <li class="parameter config-param"> <span class="param-name">verbose</span> <span class="param-type">(number)</span> <span class="param-docs">Verbosity mode.</span> </li> <li class="parameter config-param"> <span class="param-name">mode</span> <span class="param-type">('auto'|'min'|'max')</span> <span class="param-docs">Mode: one of 'min', 'max', and 'auto'.</p> <ul> <li>In 'min' mode, training will be stopped when the quantity monitored has stopped decreasing.</li> <li>In 'max' mode, training will be stopped when the quantity monitored has stopped increasing.</li> <li>In 'auto' mode, the direction is inferred automatically from the name of the monitored quantity.</li> </ul> <p>Defaults to 'auto'.</span> </li> <li class="parameter config-param"> <span class="param-name">baseline</span> <span class="param-type">(number)</span> <span class="param-docs">Baseline value of the monitored quantity.</p> <p>If specified, training will be stopped if the model doesn't show improvement over the baseline.</span> </li> <li class="parameter config-param"> <span class="param-name">restoreBestWeights</span> <span class="param-type">(boolean)</span> <span class="param-docs">Whether to restore model weights from the epoch with the best value of the monitored quantity. If <code>False</code>, the model weights obtained at the last step of training are used.</p> <p><strong><code>True</code> is not supported yet.</strong></span> </li> </ul> </div> <div class="returns"> <span class="returns-header">Returns:</span> <span class="return-type">EarlyStopping</span> </div> </div> </div> </div> </div> </div> </body> <script> (function (i, s, o, g, r, a, m) { i['GoogleAnalyticsObject'] = r; i[r] = i[r] || function () { (i[r].q = i[r].q || []).push(arguments) }, i[r].l = 1 * new Date(); a = s.createElement(o), m = s.getElementsByTagName(o)[0]; a.async = 1; a.src = g; m.parentNode.insertBefore(a, m) })(window, document, 'script', 'https://www.google-analytics.com/analytics.js', 'ga'); ga('create', 'UA-46457317-10', 'auto'); ga('send', 'pageview'); var trackOutboundLink = function (link) { var url = link.href; ga('send', 'event', 'outbound', 'click', url, { 'transport': 'beacon', 'hitCallback': function () { if (link.target.match('_blank')) { window.open(url); } else { document.location = url; } } }); } // Add outbound link tracker to external links var links = Array.prototype.slice.call(document.querySelectorAll('a')); var internal = /(js\.tensorflow\.org)|localhost/; for (var i = 0; i < links.length; i++) { (function () { var link = links[i]; if (link.href.match(internal)) { } else { link.addEventListener('click', (event) => { event.preventDefault(); trackOutboundLink(link); return false; }); } })(); } </script> </html>

Pages: 1 2 3 4 5 6 7 8 9 10