diff --git a/demos/homepage/Gemfile b/demos/homepage/Gemfile
deleted file mode 100644
index 8b53357a7e..0000000000
--- a/demos/homepage/Gemfile
+++ /dev/null
@@ -1,26 +0,0 @@
-source "https://rubygems.org"
-
-# Hello! This is where you manage which Jekyll version is used to run.
-# When you want to use a different version, change it below, save the
-# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
-#
-# bundle exec jekyll serve
-#
-# This will help ensure the proper Jekyll version is running.
-# Happy Jekylling!
-
-# This is the default theme for new Jekyll sites. You may change this to anything you like.
-gem "minima", "~> 2.0"
-
-# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
-# uncomment the line below. To upgrade, run `bundle update github-pages`.
-gem "github-pages", group: :jekyll_plugins
-
-# If you have any plugins, put them here!
-group :jekyll_plugins do
- gem "jekyll-feed", "~> 0.6"
-end
-
-# Windows does not include zoneinfo files, so bundle the tzinfo-data gem
-#gem 'tzinfo-data', platforms: [:mingw, :mswin, :x64_mingw, :jruby]
-
diff --git a/demos/homepage/Gemfile.lock b/demos/homepage/Gemfile.lock
deleted file mode 100644
index 147c3924b8..0000000000
--- a/demos/homepage/Gemfile.lock
+++ /dev/null
@@ -1,205 +0,0 @@
-GEM
- remote: https://rubygems.org/
- specs:
- activesupport (4.2.8)
- i18n (~> 0.7)
- minitest (~> 5.1)
- thread_safe (~> 0.3, >= 0.3.4)
- tzinfo (~> 1.1)
- addressable (2.5.1)
- public_suffix (~> 2.0, >= 2.0.2)
- coffee-script (2.4.1)
- coffee-script-source
- execjs
- coffee-script-source (1.12.2)
- colorator (1.1.0)
- ethon (0.10.1)
- ffi (>= 1.3.0)
- execjs (2.7.0)
- faraday (0.12.2)
- multipart-post (>= 1.2, < 3)
- ffi (1.9.18)
- forwardable-extended (2.6.0)
- gemoji (3.0.0)
- github-pages (146)
- activesupport (= 4.2.8)
- github-pages-health-check (= 1.3.5)
- jekyll (= 3.4.5)
- jekyll-avatar (= 0.4.2)
- jekyll-coffeescript (= 1.0.1)
- jekyll-default-layout (= 0.1.4)
- jekyll-feed (= 0.9.2)
- jekyll-gist (= 1.4.0)
- jekyll-github-metadata (= 2.5.1)
- jekyll-mentions (= 1.2.0)
- jekyll-optional-front-matter (= 0.2.0)
- jekyll-paginate (= 1.1.0)
- jekyll-readme-index (= 0.1.0)
- jekyll-redirect-from (= 0.12.1)
- jekyll-relative-links (= 0.4.1)
- jekyll-sass-converter (= 1.5.0)
- jekyll-seo-tag (= 2.2.3)
- jekyll-sitemap (= 1.0.0)
- jekyll-swiss (= 0.4.0)
- jekyll-theme-architect (= 0.0.4)
- jekyll-theme-cayman (= 0.0.4)
- jekyll-theme-dinky (= 0.0.4)
- jekyll-theme-hacker (= 0.0.4)
- jekyll-theme-leap-day (= 0.0.4)
- jekyll-theme-merlot (= 0.0.4)
- jekyll-theme-midnight (= 0.0.4)
- jekyll-theme-minimal (= 0.0.4)
- jekyll-theme-modernist (= 0.0.4)
- jekyll-theme-primer (= 0.3.1)
- jekyll-theme-slate (= 0.0.4)
- jekyll-theme-tactile (= 0.0.4)
- jekyll-theme-time-machine (= 0.0.4)
- jekyll-titles-from-headings (= 0.2.0)
- jemoji (= 0.8.0)
- kramdown (= 1.13.2)
- liquid (= 3.0.6)
- listen (= 3.0.6)
- mercenary (~> 0.3)
- minima (= 2.1.1)
- rouge (= 1.11.1)
- terminal-table (~> 1.4)
- github-pages-health-check (1.3.5)
- addressable (~> 2.3)
- net-dns (~> 0.8)
- octokit (~> 4.0)
- public_suffix (~> 2.0)
- typhoeus (~> 0.7)
- html-pipeline (2.6.0)
- activesupport (>= 2)
- nokogiri (>= 1.4)
- i18n (0.8.6)
- jekyll (3.4.5)
- addressable (~> 2.4)
- colorator (~> 1.0)
- jekyll-sass-converter (~> 1.0)
- jekyll-watch (~> 1.1)
- kramdown (~> 1.3)
- liquid (~> 3.0)
- mercenary (~> 0.3.3)
- pathutil (~> 0.9)
- rouge (~> 1.7)
- safe_yaml (~> 1.0)
- jekyll-avatar (0.4.2)
- jekyll (~> 3.0)
- jekyll-coffeescript (1.0.1)
- coffee-script (~> 2.2)
- jekyll-default-layout (0.1.4)
- jekyll (~> 3.0)
- jekyll-feed (0.9.2)
- jekyll (~> 3.3)
- jekyll-gist (1.4.0)
- octokit (~> 4.2)
- jekyll-github-metadata (2.5.1)
- jekyll (~> 3.1)
- octokit (~> 4.0, != 4.4.0)
- jekyll-mentions (1.2.0)
- activesupport (~> 4.0)
- html-pipeline (~> 2.3)
- jekyll (~> 3.0)
- jekyll-optional-front-matter (0.2.0)
- jekyll (~> 3.0)
- jekyll-paginate (1.1.0)
- jekyll-readme-index (0.1.0)
- jekyll (~> 3.0)
- jekyll-redirect-from (0.12.1)
- jekyll (~> 3.3)
- jekyll-relative-links (0.4.1)
- jekyll (~> 3.3)
- jekyll-sass-converter (1.5.0)
- sass (~> 3.4)
- jekyll-seo-tag (2.2.3)
- jekyll (~> 3.3)
- jekyll-sitemap (1.0.0)
- jekyll (~> 3.3)
- jekyll-swiss (0.4.0)
- jekyll-theme-architect (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-cayman (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-dinky (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-hacker (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-leap-day (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-merlot (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-midnight (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-minimal (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-modernist (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-primer (0.3.1)
- jekyll (~> 3.3)
- jekyll-theme-slate (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-tactile (0.0.4)
- jekyll (~> 3.3)
- jekyll-theme-time-machine (0.0.4)
- jekyll (~> 3.3)
- jekyll-titles-from-headings (0.2.0)
- jekyll (~> 3.3)
- jekyll-watch (1.5.0)
- listen (~> 3.0, < 3.1)
- jemoji (0.8.0)
- activesupport (~> 4.0)
- gemoji (~> 3.0)
- html-pipeline (~> 2.2)
- jekyll (>= 3.0)
- kramdown (1.13.2)
- liquid (3.0.6)
- listen (3.0.6)
- rb-fsevent (>= 0.9.3)
- rb-inotify (>= 0.9.7)
- mercenary (0.3.6)
- mini_portile2 (2.2.0)
- minima (2.1.1)
- jekyll (~> 3.3)
- minitest (5.10.3)
- multipart-post (2.0.0)
- net-dns (0.8.0)
- nokogiri (1.8.0)
- mini_portile2 (~> 2.2.0)
- octokit (4.7.0)
- sawyer (~> 0.8.0, >= 0.5.3)
- pathutil (0.14.0)
- forwardable-extended (~> 2.6)
- public_suffix (2.0.5)
- rb-fsevent (0.10.2)
- rb-inotify (0.9.10)
- ffi (>= 0.5.0, < 2)
- rouge (1.11.1)
- safe_yaml (1.0.4)
- sass (3.5.1)
- sass-listen (~> 4.0.0)
- sass-listen (4.0.0)
- rb-fsevent (~> 0.9, >= 0.9.4)
- rb-inotify (~> 0.9, >= 0.9.7)
- sawyer (0.8.1)
- addressable (>= 2.3.5, < 2.6)
- faraday (~> 0.8, < 1.0)
- terminal-table (1.8.0)
- unicode-display_width (~> 1.1, >= 1.1.1)
- thread_safe (0.3.6)
- typhoeus (0.8.0)
- ethon (>= 0.8.0)
- tzinfo (1.2.3)
- thread_safe (~> 0.1)
- unicode-display_width (1.3.0)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- github-pages
- jekyll-feed (~> 0.6)
- minima (~> 2.0)
-
-BUNDLED WITH
- 1.15.3
diff --git a/demos/homepage/_config.yml b/demos/homepage/_config.yml
deleted file mode 100644
index cb0c8718ed..0000000000
--- a/demos/homepage/_config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Welcome to Jekyll!
-#
-# This config file is meant for settings that affect your whole blog, values
-# which you are expected to set up once and rarely edit after that. If you find
-# yourself editing this file very often, consider using Jekyll's data files
-# feature for the data you need to update frequently.
-#
-# For technical reasons, this file is *NOT* reloaded automatically when you use
-# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
-
-# Site settings
-# These are used to personalize your new site. If you look in the HTML files,
-# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
-# You can create any custom variable you would like, and they will be accessible
-# in the templates via {{ site.myvariable }}.
-title: deeplearn.js
-baseurl: "" # the subpath of your site, e.g. /blog
-
-# Build settings
-markdown: kramdown
-theme: minima
-gems:
- - jekyll-feed
- - jekyll-redirect-from
-exclude:
- - Gemfile
- - Gemfile.lock
- - node_modules/
- - demos/node_modules/
- - demos/bower_components/
-
-# deployment
-host: 0.0.0.0
diff --git a/demos/homepage/_includes/footer.html b/demos/homepage/_includes/footer.html
deleted file mode 100644
index eca84845e6..0000000000
--- a/demos/homepage/_includes/footer.html
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
diff --git a/demos/homepage/_includes/header.html b/demos/homepage/_includes/header.html
deleted file mode 100644
index 852ea76eb2..0000000000
--- a/demos/homepage/_includes/header.html
+++ /dev/null
@@ -1,39 +0,0 @@
-
-
-
-
diff --git a/demos/homepage/_layouts/default.html b/demos/homepage/_layouts/default.html
deleted file mode 100644
index 895f99b077..0000000000
--- a/demos/homepage/_layouts/default.html
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-
-
-
- deeplearn.js
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {% include header.html %}
-
- {{ content }}
- {% include footer.html %}
-
-
-
-
diff --git a/demos/homepage/_layouts/page.html b/demos/homepage/_layouts/page.html
deleted file mode 100644
index 8f69ea1b80..0000000000
--- a/demos/homepage/_layouts/page.html
+++ /dev/null
@@ -1,24 +0,0 @@
----
-layout: default
----
-
-
-
-
-
- {% assign pages_list = site.pages | sort: 'order' %}
-
-
- {% for my_page in pages_list %}
- {% if my_page.title and my_page.title != '_' %}
- {{ my_page.title | escape }}
- {% endif %}
- {% endfor %}
-
-
-
-
diff --git a/demos/homepage/assets/style.css b/demos/homepage/assets/style.css
deleted file mode 100644
index 20200fc79f..0000000000
--- a/demos/homepage/assets/style.css
+++ /dev/null
@@ -1,325 +0,0 @@
-html {
- width: 100%;
- height: 100%;
-}
-body {
- font-family: "Roboto", "Helvetica", "Arial", sans-serif;
- margin: 0;
- width: 100%;
- height: 100%;
-}
-head {
- height: 0;
- width: 0;
-}
-/* Typography */
-p {
- font-family: 'Roboto', sans-serif;
- line-height: 1.67em;
- font-size: 16px;
-}
-.mdl-list__item a {
- text-decoration: none;
- color: rgba(0,0,0,0.64);
- font-weight: 400;
- letter-spacing: 0;
-}
-.mdl-list__item a:hover {
- text-decoration: underline;
- color: rgba(0,0,0,0.64);
- font-weight: 400;
- letter-spacing: 0;
-}
-.mdl-navigation__link{
- font-family: 'Roboto', sans-serif;
-}
-.mdl-layout__drawer {
- font-family: 'Roboto', sans-serif;
-}
-
-/* CPPN Demo */
-.banner {
- background-size: cover;
- background-position: center;
- min-height: 250px;
- padding-top: 64px;
- padding-bottom: 32px;
- position: relative;
-}
-.banner-cover {
- position: relative;
- background: -webkit-linear-gradient(#eeede2, #a3d5bc, #6ab8b5, #42737f);
- background: -moz-linear-gradient(#eeede2, #a3d5bc, #6ab8b5, #42737f);
- background: linear-gradient(#eeede2, #a3d5bc, #6ab8b5, #42737f);
-}
-.banner-text {
- color:#fff;
-}
-.cppn-controls {
- min-width: 300px;
- max-width: 300px;
- padding:24px 24px 60px 24px;
- background-color: rgba(255,255,255,0.99);
- position: relative;
-}
-.cppn-demo {
- color:#fff;
-}
-#disabled-demo-overlay {
- position: absolute;
- width: 100%;
- height: 100%;
- background-color: rgba(62, 82, 90, .9);
- top: 0;
- left: 0;
-}
-#disabled-demo {
- margin: auto;
- position: absolute;
- top: 20%;
- left: 0;
- right: 0;
- padding: 32px;
- bottom: 0;
- font-size: 18px;
- color: white;
- font-weight: 400;
- text-align: left;
- line-height: 1.3325em;
- z-index: 2;
-}
-#inference {
- width: 100%;
- height: calc(100% + 1px);
- position: absolute;
-}
-.getmdl-select .mdl-icon-toggle__label{
- float:right;
- margin-top:-30px;
- color:rgba(0,0,0,0.4);
-}
-/* Introduction */
-.intro-text {
- margin: 32px auto 32px auto;
-}
-.mdl-grid {
- max-width: 1200px;
-}
-.mdl-mini-footer {
- margin-top: 72px;
-}
-.mdl-card__actions {
- padding-left: 12px;
-}
-.intro-headline {
- line-height: 1.67em;
- font-weight: 300;
- color: #888;
-}
-/* Responsive behavior */
-.intro-text > .mdl-cell--8-col-tablet {
- margin-top: 72px;
-}
-.banner >.mdl-cell--8-col-tablet {
- margin: 36px;
-}
-.resource-tabs {
- font-family: 'Roboto', sans-serif;
-}
-
-a {
- color: #346f91;
- font-size: 16px;
-}
-a:visited {
- color: #346f91;
-}
-a:hover {
- color: #346f91;
-}
-.mdl-card__actions a:hover {
- text-decoration: none;
-}
-
-a.main-title {
- color: white;
- text-decoration: none;
-}
-
-blockquote {
- color: black;
- padding: 8px 8px 8px 20px;
- border-left-width: 2px;
- font-style: normal;
- border-left-style: solid;
- border-color: #9e9e9e;
- background-color: #eee;
- margin-bottom: 32px;
-}
-blockquote:before {
- content: none;
-}
-blockquote:after {
- content: none;
-}
-ul {
- margin-left: 16px;
- list-style-type: disc;
-}
-ul ul {
- list-style-type: disc;
-}
-ul ul ul {
- list-style-type: disc;
-}
-ul ul ul ul {
- list-style-type: disc;
-}
-.highlight .err {
- color: inherit;
- background-color: inherit;
-}
-table {
- border-spacing: 20px;
-}
-.site-nav {
- position: fixed;
- float: inherit;
-}
-.site-nav ul {
- margin-top: 32px;
- border-left: solid 3px #C0EbF1;
- padding-left: 20px;
- line-height: 28px;
- list-style-type: none;
- line-height: 1.3em;
- font-weight: normal;
-}
-.site-nav ul li {
- padding-bottom: 20px;
-}
-.site-nav ul a {
- color: #50797f;
- font-weight: normal;
-}
-ul.index {
- border-left: 2px solid #346f91;
- margin-left: 16px;
- margin-top: 16px;
- padding-left: 16px;
- list-style-type: none;
-}
-.deeplearn-shine {
- color: #777;
- font-weight: 500;
-}
-
-.featured-demo div.mdl-card__title {
- height: 352px;
- color: white;
- z-index: 1;
- padding: 5px 12px;
- font-weight: 400;
- font-size: 48px;
-}
-
-.featured-demo h1.mdl-card__title-text {
- font-size: 36px;
- margin: 0 0 2px 2px;
- padding: 5px 12px;
-}
-
-.featured-demo div.mdl-card__title:before {
- background: none;
-}
-
-.demo-card .mdl-card__title {
- color: #fff;
- height: 176px;
-}
-
-.demo-card .mdl-card__title:before {
- content: '';
- height: 176px;
- position: absolute;
- top: 0;
- right: 0;
- bottom: 0;
- left: 0;
- background: rgba(41, 73, 77, 0.73);
-}
-
-div.mdl-card__title h1.mdl-card__title-text {
- font-weight: 200;
- font-size: 0.6em;
- letter-spacing: 0.5px;
- background-color: rgba(0, 0, 0, .54);
-}
-
-#teachable-machine {
- background: url('../demos/images/teachablemachine_preview.jpg') center / cover;
-}
-
-#perf-rnn {
- background: url('../demos/images/performance_rnn_preview.jpg') center / cover;
-}
-
-#model-builder {
- background: url('../demos/images/model-builder.png') center / cover;
-}
-
-#webcam {
- background: url('../demos/images/imagenet.png') center / cover;
-}
-
-#nnart {
- background: url('../demos/images/nn-art.png') center / cover;
-}
-
-#benchmarks {
- background: url('../demos/images/benchmark.png') center / cover;
-}
-
-#playground {
- background: url('../demos/images/playground_preview.png') center / cover;
-}
-
-.demo-card .mdl-card__title-text {
- color: white;
- z-index: 1;
- font-weight: 400;
-}
-
-.mdl-typography--display-2, h1, h2, h3, h4 {
- color: #414141;
- font-weight: 300;
-}
-code {
- background: none;
-}
-.highlighter-rouge .highlight {
- background-color: #f5f5f5;
- padding-bottom: 16px;
- margin-bottom: 32px;
-}
-
-h2 {
- font-size: 40px;
-}
-
-h3 {
- font-size: 30px;
-}
-p.intro-body {
- font-weight: 300;
-}
-.mdl-mini-footer__link-list a {
- color: rgb(158, 158, 158);
-}
-.mdl-mini-footer__link-list a:visited {
- color: rgb(158, 158, 158);
-}
-
-#travis-badge {
- display: none;
-}
diff --git a/demos/homepage/assets/support.js b/demos/homepage/assets/support.js
deleted file mode 100644
index 77b9334efb..0000000000
--- a/demos/homepage/assets/support.js
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * @license
- * Copyright 2017 Google Inc. All Rights Reserved.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =============================================================================
- */
-function isWebGLEnabled() {
- var canvas = document.createElement('canvas');
-
- var attributes = {
- alpha: false,
- antialias: false,
- premultipliedAlpha: false,
- preserveDrawingBuffer: false,
- depth: false,
- stencil: false,
- failIfMajorPerformanceCaveat: true
- };
- return null != (canvas.getContext('webgl', attributes) ||
- canvas.getContext('experimental-webgl', attributes));
-}
-
-function buildAndShowDialog(title, content) {
- var dialogContainer = document.createElement('div');
- dialogContainer.innerHTML = `
-
- ${title}
-
-
- `;
- document.body.appendChild(dialogContainer);
- var dialog = document.getElementById('dialog');
- dialog.style.width = '430px';
- dialogPolyfill.registerDialog(dialog);
- dialog.showModal();
-}
-
-function inializePolymerPage() {
- document.addEventListener('WebComponentsReady', function(event) {
- if (!isWebGLEnabled()) {
- const title = `Check if hardware acceleration is enabled.`;
- const content = `
- Looks like your device is supported but settings aren't in place.
- Please check if WebGL is enabled for your browser.
-
- See: How can I enable WebGL in my browser?
- `;
- buildAndShowDialog(title, content);
- } else {
- var bundleScript = document.createElement('script');
- bundleScript.src = 'bundle.js';
- document.head.appendChild(bundleScript);
- }
- });
-}
-inializePolymerPage();
diff --git a/demos/homepage/homepage.ts b/demos/homepage/homepage.ts
deleted file mode 100644
index 60971f821d..0000000000
--- a/demos/homepage/homepage.ts
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * @license
- * Copyright 2017 Google Inc. All Rights Reserved.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =============================================================================
- */
-
-import {ENV} from 'deeplearn';
-import {ActivationFunction, CPPN} from '../nn-art/cppn';
-
-const inferenceCanvas =
- document.querySelector('#inference') as HTMLCanvasElement;
-
-if (ENV.get('WEBGL_VERSION') >= 1) {
- startCPPN();
-} else {
- document.getElementById('disabled-demo-overlay').style.display = '';
- inferenceCanvas.style.display = 'none';
-}
-
-function startCPPN() {
- const DEFAULT_Z_SCALE = 1;
- const NUM_NEURONS = 30;
- const DEFAULT_NUM_LAYERS = 2;
- const WEIGHTS_STDEV = 0.6;
-
- const cppn = new CPPN(inferenceCanvas);
-
- cppn.setActivationFunction('tanh');
- cppn.setNumLayers(DEFAULT_NUM_LAYERS);
- cppn.setZ1Scale(convertZScale(DEFAULT_Z_SCALE));
- cppn.setZ2Scale(convertZScale(DEFAULT_Z_SCALE));
- cppn.generateWeights(NUM_NEURONS, WEIGHTS_STDEV);
- cppn.start();
-
- const currentActivationFnElement =
- document.querySelector('#activation-fn') as HTMLInputElement;
- document.querySelector('#activation-selector')
- .addEventListener(
- // tslint:disable-next-line:no-any
- 'click', (event: any) => {
- const activationFn =
- (event.target as HTMLElement).getAttribute('data-val') as
- ActivationFunction;
- currentActivationFnElement.value = activationFn;
- cppn.setActivationFunction(activationFn);
- });
-
- const layersSlider =
- document.querySelector('#layers-slider') as HTMLInputElement;
- const layersCountElement =
- document.querySelector('#layers-count') as HTMLDivElement;
- layersSlider.addEventListener('input', (event) => {
- // tslint:disable-next-line:no-any
- const numLayers = parseInt((event as any).target.value, 10);
- layersCountElement.innerText = numLayers.toString();
- cppn.setNumLayers(numLayers);
- });
- layersCountElement.innerText = DEFAULT_NUM_LAYERS.toString();
-
- const z1Slider = document.querySelector('#z1-slider') as HTMLInputElement;
- z1Slider.addEventListener('input', (event) => {
- // tslint:disable-next-line:no-any
- const z1Scale = parseInt((event as any).target.value, 10);
- cppn.setZ1Scale(convertZScale(z1Scale));
- });
-
- const z2Slider = document.querySelector('#z2-slider') as HTMLInputElement;
- z2Slider.addEventListener('input', (event) => {
- // tslint:disable-next-line:no-any
- const z2Scale = parseInt((event as any).target.value, 10);
- cppn.setZ2Scale(convertZScale(z2Scale));
- });
-
- const randomizeButton =
- document.querySelector('#random') as HTMLButtonElement;
- randomizeButton.addEventListener('click', () => {
- cppn.generateWeights(NUM_NEURONS, WEIGHTS_STDEV);
- if (!playing) {
- cppn.start();
- requestAnimationFrame(() => {
- cppn.stopInferenceLoop();
- });
- }
- });
-
- let playing = true;
- const toggleButton = document.querySelector('#toggle') as HTMLButtonElement;
- toggleButton.addEventListener('click', () => {
- playing = !playing;
- if (playing) {
- toggleButton.innerHTML = 'STOP';
- cppn.start();
- } else {
- toggleButton.innerHTML = 'START';
- cppn.stopInferenceLoop();
- }
- });
-
- let canvasOnScreenLast = true;
- let scrollEventScheduled = false;
- const mainElement = document.querySelector('main') as HTMLElement;
- mainElement.addEventListener('scroll', () => {
- if (!scrollEventScheduled) {
- window.requestAnimationFrame(() => {
- const canvasOnScreen = isCanvasOnScreen();
- if (canvasOnScreen !== canvasOnScreenLast) {
- if (canvasOnScreen) {
- if (playing) {
- cppn.start();
- }
- } else {
- cppn.stopInferenceLoop();
- }
- canvasOnScreenLast = canvasOnScreen;
- }
- scrollEventScheduled = false;
- });
- }
- scrollEventScheduled = true;
- });
-
- function isCanvasOnScreen() {
- return mainElement.scrollTop < inferenceCanvas.offsetHeight;
- }
-
- function convertZScale(z: number): number {
- return (103 - z);
- }
-}
diff --git a/demos/homepage/index.md b/demos/homepage/index.md
deleted file mode 100644
index e78b532d85..0000000000
--- a/demos/homepage/index.md
+++ /dev/null
@@ -1,306 +0,0 @@
----
-layout: default
----
-
-
-
-
-
-
-
-
deeplearn.js
-
- a hardware-accelerated
- machine intelligence
- library for the web
-
-
-
-
-
-
-
- keyboard_arrow_down
-
- Activation function
-
-
-
-
-
-
-
-
z1 time
-
-
-
-
z2 time
-
-
-
-
randomize
-
stop
-
-
What is a CPPN?
-
-
- Your device does not support WebGL, so we cannot show this demo. Please enable hardware acceleration or come back on a device that supports WebGL.
-
-
-
-
-
-
-
-
-
-
-
deeplearn.js is an open-source library that brings performant machine learning building blocks to the web, allowing you to train neural networks in a browser or run pre-trained models in inference mode.
-
-
-
-
We provide an API that closely mirrors
- the TensorFlow eager API.deeplearn.js
- was originally developed by the Google Brain PAIR team to build powerful
- interactive machine learning tools for the browser. You can use the library
- for everything from education, to model understanding, to art projects.
-
-
-
-
-
-
-
-
-
-
- {% capture my_include %}{% include README.md %}{% endcapture %}
- {{ my_include | markdownify }}
-
-
-
-
-
-
-
-
Acknowledgements
-
-
-
-
-
-
-
-
-
-
- We would like to acknowledge Chi Zeng, David Farhi, Mahima Pushkarna,
- Lauren Hannah-Murphy, Minsuk (Brian) Kahng, James Wexler, Martin Wattenberg,
- Fernanda ViƩgas, Greg Corrado, Jeff Dean for their tremendous help, and the
- Google Brain team for providing support for the project.
-
-
-
-
-
diff --git a/demos/homepage/preview.png b/demos/homepage/preview.png
deleted file mode 100644
index 6c8d5610b1..0000000000
Binary files a/demos/homepage/preview.png and /dev/null differ
diff --git a/package.json b/package.json
index ab6bb36a34..db9c270187 100644
--- a/package.json
+++ b/package.json
@@ -1,11 +1,11 @@
{
- "name": "deeplearn",
+ "name": "@tensorflow/tfjs-core",
"version": "0.6.0-alpha7",
"description": "Hardware-accelerated JavaScript library for machine intelligence",
"private": false,
"main": "dist/index.js",
- "jsdelivr": "dist/deeplearn.min.js",
- "unpkg": "dist/deeplearn.min.js",
+ "jsdelivr": "dist/tf-core.min.js",
+ "unpkg": "dist/tf-core.min.js",
"types": "dist/index.d.ts",
"repository": {
"type": "git",
diff --git a/scripts/build-npm.sh b/scripts/build-npm.sh
index 807c39ac9b..4a93f7428f 100755
--- a/scripts/build-npm.sh
+++ b/scripts/build-npm.sh
@@ -19,7 +19,7 @@ set -e
rimraf dist/
yarn
tsc --sourceMap false
-browserify --standalone dl src/index.ts -p [tsify] > dist/deeplearn.js
-uglifyjs dist/deeplearn.js -c -m -o dist/deeplearn.min.js
-echo "Stored standalone library at dist/deeplearn(.min).js"
+browserify --standalone tfc src/index.ts -p [tsify] > dist/tf-core.js
+uglifyjs dist/tf-core.js -c -m -o dist/tf-core.min.js
+echo "Stored standalone library at dist/tf-core(.min).js"
npm pack
diff --git a/src/browser_util.ts b/src/browser_util.ts
index b84058cbbc..26e5c9fbc3 100644
--- a/src/browser_util.ts
+++ b/src/browser_util.ts
@@ -21,7 +21,7 @@ export class BrowserUtil {
* Returns a promise that resolve when a requestAnimationFrame has completed.
*
* This is simply a sugar method so that users can do the following:
- * `await dl.nextFrame();`
+ * `await tf.nextFrame();`
*/
@doc({heading: 'Performance', subheading: 'Timing'})
static nextFrame(): Promise {
diff --git a/src/environment.ts b/src/environment.ts
index e6301624d5..b96c53b774 100644
--- a/src/environment.ts
+++ b/src/environment.ts
@@ -222,7 +222,7 @@ export class Environment {
*
* @param backendType The backend type. Currently supports `'webgl'|'cpu'`.
* @param safeMode Defaults to false. In safe mode, you are forced to
- * construct tensors and call math operations inside a `dl.tidy()` which
+ * construct tensors and call math operations inside a `tidy()` which
* will automatically clean up intermediate tensors.
*/
@doc({heading: 'Environment'})
@@ -256,7 +256,7 @@ export class Environment {
* - `unreliable`: `Optional` `boolean`:
* - On WebGL, not present (always reliable).
* - On CPU, true. Due to automatic garbage collection, these numbers
- * represent undisposed tensors, i.e. not wrapped in `dl.tidy()`, or
+ * represent undisposed tensors, i.e. not wrapped in `tidy()`, or
* lacking a call to `tensor.dispose()`.
*/
@doc({heading: 'Performance', subheading: 'Memory'})
diff --git a/src/gradients.ts b/src/gradients.ts
index 847d779245..e26ee83601 100644
--- a/src/gradients.ts
+++ b/src/gradients.ts
@@ -53,21 +53,21 @@ export class Gradients {
* // f(x) = x ^ 2
* const f = x => x.square();
* // f'(x) = 2x
- * const g = dl.grad(f);
+ * const g = tf.grad(f);
*
- * const x = dl.tensor1d([2, 3]);
+ * const x = tf.tensor1d([2, 3]);
* g(x).print();
* ```
*
* ```js
* // f(x) = x ^ 3
- * const f = x => x.pow(dl.scalar(3, 'int32'));
+ * const f = x => x.pow(tf.scalar(3, 'int32'));
* // f'(x) = 3x ^ 2
- * const g = dl.grad(f);
+ * const g = tf.grad(f);
* // f''(x) = 6x
- * const gg = dl.grad(g);
+ * const gg = tf.grad(g);
*
- * const x = dl.tensor1d([2, 3]);
+ * const x = tf.tensor1d([2, 3]);
* gg(x).print();
* ```
*
@@ -111,10 +111,10 @@ export class Gradients {
* // f(a, b) = a * b
* const f = (a, b) => a.mul(b);
* // df / da = b, df / db = a
- * const g = dl.grads(f);
+ * const g = tf.grads(f);
*
- * const a = dl.tensor1d([2, 3]);
- * const b = dl.tensor1d([-2, -3]);
+ * const a = tf.tensor1d([2, 3]);
+ * const b = tf.tensor1d([-2, -3]);
* const [da, db] = g([a, b]);
* console.log('da');
* da.print();
@@ -150,7 +150,7 @@ export class Gradients {
}
/**
- * Like `dl.grad`, but also returns the value of `f()`. Useful when `f()`
+ * Like `grad`, but also returns the value of `f()`. Useful when `f()`
* returns a metric you want to show.
*
* The result is a rich object with the following properties:
@@ -161,9 +161,9 @@ export class Gradients {
* // f(x) = x ^ 2
* const f = x => x.square();
* // f'(x) = 2x
- * const g = dl.valueAndGrad(f);
+ * const g = tf.valueAndGrad(f);
*
- * const x = dl.tensor1d([2, 3]);
+ * const x = tf.tensor1d([2, 3]);
* const {value, grad} = g(x);
*
* console.log('value');
@@ -206,10 +206,10 @@ export class Gradients {
* // f(a, b) = a * b
* const f = (a, b) => a.mul(b);
* // df/da = b, df/db = a
- * const g = dl.valueAndGrads(f);
+ * const g = tf.valueAndGrads(f);
*
- * const a = dl.tensor1d([2, 3]);
- * const b = dl.tensor1d([-2, -3]);
+ * const a = tf.tensor1d([2, 3]);
+ * const b = tf.tensor1d([-2, -3]);
* const {value, grads} = g([a, b]);
*
* const [da, db] = grads;
@@ -257,14 +257,14 @@ export class Gradients {
* defaults to all trainable variables.
*
* ```js
- * const a = dl.variable(dl.tensor1d([3, 4]));
- * const b = dl.variable(dl.tensor1d([5, 6]));
- * const x = dl.tensor1d([1, 2]);
+ * const a = tf.variable(tf.tensor1d([3, 4]));
+ * const b = tf.variable(tf.tensor1d([5, 6]));
+ * const x = tf.tensor1d([1, 2]);
*
* // f(a, b) = a * x ^ 2 + b * x
* const f = () => a.mul(x.square()).add(b.mul(x)).sum();
* // df/da = x ^ 2, df/db = x
- * const {value, grads} = dl.variableGrads(f);
+ * const {value, grads} = tf.variableGrads(f);
*
* Object.keys(grads).forEach(varName => grads[varName].print());
* ```
@@ -332,13 +332,13 @@ export class Gradients {
* respect to each input of `f` are computed using `f().gradFunc`.
*
* ```js
- * const customOp = dl.customGrad(x => {
+ * const customOp = tf.customGrad(x => {
* // Override gradient of our custom x ^ 2 op to be dy * abs(x);
* return {value: x.square(), gradFunc: dy => [dy.mul(x.abs())]};
* });
*
- * const x = dl.tensor1d([-1, -2, 3]);
- * const dx = dl.grad(x => customOp(x));
+ * const x = tf.tensor1d([-1, -2, 3]);
+ * const dx = tf.grad(x => customOp(x));
*
* console.log(`f(x):`);
* customOp(x).print();
diff --git a/src/ops/array_ops.ts b/src/ops/array_ops.ts
index a6a37723df..6ce0130063 100644
--- a/src/ops/array_ops.ts
+++ b/src/ops/array_ops.ts
@@ -36,18 +36,18 @@ export class ArrayOps {
*
* ```js
* // Pass an array of values to create a vector.
- * dl.tensor([1, 2, 3, 4]).print();
+ * tf.tensor([1, 2, 3, 4]).print();
* ```
*
* ```js
* // Pass a nested array of values to make a matrix or a higher
* // dimensional tensor.
- * dl.tensor([[1, 2], [3, 4]]).print();
+ * tf.tensor([[1, 2], [3, 4]]).print();
* ```
*
* ```js
* // Pass a flat array and specify a shape yourself.
- * dl.tensor([1, 2, 3, 4], [2, 2]).print();
+ * tf.tensor([1, 2, 3, 4], [2, 2]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
@@ -80,12 +80,11 @@ export class ArrayOps {
/**
* Creates rank-0 `Tensor` (scalar) with the provided value and dtype.
*
- * This method is mainly for self documentation and TypeScript typings as the
- * same functionality can be achieved with `tensor`. In general, we recommend
- * using this method as it makes code more readable.
+ * The same functionality can be achieved with `tensor`, but in general
+ * we recommend using `scalar` as it makes the code more readable.
*
* ```js
- * dl.scalar(3.14).print();
+ * tf.scalar(3.14).print();
* ```
*
* @param value The value of the scalar.
@@ -104,12 +103,11 @@ export class ArrayOps {
/**
* Creates rank-1 `Tensor` with the provided values, shape and dtype.
*
- * This method is mainly for self documentation and TypeScript typings as the
- * same functionality can be achieved with `tensor`. In general, we recommend
- * using this method as it makes code more readable.
+ * The same functionality can be achieved with `tensor`, but in general
+ * we recommend using `tensor1d` as it makes the code more readable.
*
* ```js
- * dl.tensor1d([1, 2, 3]).print();
+ * tf.tensor1d([1, 2, 3]).print();
* ```
*
* @param values The values of the tensor. Can be array of numbers,
@@ -129,17 +127,16 @@ export class ArrayOps {
/**
* Creates rank-2 `Tensor` with the provided values, shape and dtype.
*
- * This method is mainly for self documentation and TypeScript typings as the
- * same functionality can be achieved with `tensor`. In general, we recommend
- * using this method as it makes code more readable.
+ * The same functionality can be achieved with `tensor`, but in general
+ * we recommend using `tensor2d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
- * dl.tensor2d([[1, 2], [3, 4]]).print();
+ * tf.tensor2d([[1, 2], [3, 4]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
- * dl.tensor2d([1, 2, 3, 4], [2, 2]).print();
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
@@ -165,17 +162,16 @@ export class ArrayOps {
/**
* Creates rank-3 `Tensor` with the provided values, shape and dtype.
*
- * This method is mainly for self documentation and TypeScript typings as
- * the same functionality can be achieved with `tensor`. In general, we
- * recommend using this method as it makes code more readable.
+ * The same functionality can be achieved with `tensor`, but in general
+ * we recommend using `tensor3d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
- * dl.tensor3d([[[1], [2]], [[3], [4]]]).print();
+ * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
- * dl.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();
+ * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
@@ -200,13 +196,17 @@ export class ArrayOps {
/**
* Creates rank-4 `Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tensor`, but in general
+ * we recommend using `tensor4d` as it makes the code more readable.
+ *
* ```js
* // Pass a nested array.
- * dl.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
+ * tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
- * dl.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();
+ * tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
@@ -233,7 +233,7 @@ export class ArrayOps {
* Creates a `Tensor` with all elements set to 1.
*
* ```js
- * dl.ones([2, 2]).print();
+ * tf.ones([2, 2]).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
@@ -252,7 +252,7 @@ export class ArrayOps {
* Creates a `Tensor` with all elements set to 0.
*
* ```js
- * dl.zeros([2, 2]).print();
+ * tf.zeros([2, 2]).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
@@ -271,7 +271,7 @@ export class ArrayOps {
* Creates a `Tensor` filled with a scalar value.
*
* ```js
- * dl.fill([2, 2], 4).print();
+ * tf.fill([2, 2], 4).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
@@ -295,8 +295,8 @@ export class ArrayOps {
* given tensor.
*
* ```js
- * const x = dl.tensor([1, 2]);
- * dl.onesLike(x).print();
+ * const x = tf.tensor([1, 2]);
+ * tf.onesLike(x).print();
* ```
* @param x A tensor.
*/
@@ -311,8 +311,8 @@ export class ArrayOps {
* given tensor.
*
* ```js
- * const x = dl.tensor([1, 2]);
- * dl.zerosLike(x).print();
+ * const x = tf.tensor([1, 2]);
+ * tf.zerosLike(x).print();
* ```
*
* @param x The tensor of required shape.
@@ -328,7 +328,7 @@ export class ArrayOps {
* tensor.
*
* ```js
- * const x = dl.tensor([1, 2]);
+ * const x = tf.tensor([1, 2]);
* x.clone().print();
* ```
*
@@ -344,7 +344,7 @@ export class ArrayOps {
* Creates a `Tensor` with values sampled from a normal distribution.
*
* ```js
- * dl.randomNormal([2, 2]).print();
+ * tf.randomNormal([2, 2]).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
@@ -375,7 +375,7 @@ export class ArrayOps {
* distribution.
*
* ```js
- * dl.truncatedNormal([2, 2]).print();
+ * tf.truncatedNormal([2, 2]).print();
* ```
*
* The generated values follow a normal distribution with specified mean and
@@ -413,7 +413,7 @@ export class ArrayOps {
* bound maxval is excluded.
*
* ```js
- * dl.randomUniform([2, 2]).print();
+ * tf.randomUniform([2, 2]).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
@@ -518,7 +518,7 @@ export class ArrayOps {
* `offValue` (defaults to 0).
*
* ```js
- * dl.oneHot(dl.tensor1d([0, 1]), 3).print();
+ * tf.oneHot(tf.tensor1d([0, 1]), 3).print();
* ```
*
* @param indices 1D Array of indices.
@@ -550,7 +550,7 @@ export class ArrayOps {
* image.data[2] = 200;
* image.data[3] = 255;
*
- * dl.fromPixels(image).print();
+ * tf.fromPixels(image).print();
* ```
*
* @param pixels The input image to construct the tensor from.
@@ -587,7 +587,7 @@ export class ArrayOps {
* tensor.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
* x.reshape([2, 2]).print();
* ```
*
@@ -613,7 +613,7 @@ export class ArrayOps {
* Removes dimensions of size 1 from the shape of a `Tensor`.
*
* ```js
- * const x = dl.tensor([1, 2, 3, 4], [1, 1, 4]);
+ * const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]);
* x.squeeze().print();
* ```
*
@@ -631,8 +631,8 @@ export class ArrayOps {
* Casts a `Tensor` to a new dtype.
*
* ```js
- * const x = dl.tensor1d([1.5, 2.5, 3]);
- * dl.cast(x, 'int32').print();
+ * const x = tf.tensor1d([1.5, 2.5, 3]);
+ * tf.cast(x, 'int32').print();
* ```
* @param x The input tensor to be casted.
* @param dtype The dtype to cast the input tensor to.
@@ -657,13 +657,13 @@ export class ArrayOps {
* `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.
*
* ```js
- * const a = dl.tensor1d([1, 2]);
+ * const a = tf.tensor1d([1, 2]);
*
* a.tile([2]).print(); // or a.tile([2])
* ```
*
* ```js
- * const a = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* a.tile([1, 2]).print(); // or a.tile([1, 2])
* ```
@@ -734,15 +734,15 @@ export class ArrayOps {
* Gather slices from tensor `x`'s axis `axis` according to `indices`.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
- * const indices = dl.tensor1d([1, 3, 3]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const indices = tf.tensor1d([1, 3, 3]);
*
* x.gather(indices).print();
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
- * const indices = dl.tensor1d([1, 1, 0]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const indices = tf.tensor1d([1, 1, 0]);
*
* x.gather(indices).print();
* ```
@@ -818,11 +818,10 @@ export class ArrayOps {
/**
* Pads a `Tensor` with a given value and paddings.
*
- * This operation currently only implements the `CONSTANT` mode from
- * Tensorflow's `pad` operation.
+ * This operation currently only implements the `CONSTANT` mode.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
* x.pad([[1, 2]]).print();
* ```
* @param x The tensor to pad.
@@ -853,10 +852,10 @@ export class ArrayOps {
* Stacks a list of rank-`R` `Tensor`s into one rank-`(R+1)` `Tensor`.
*
* ```js
- * const a = dl.tensor1d([1, 2]);
- * const b = dl.tensor1d([3, 4]);
- * const c = dl.tensor1d([5, 6]);
- * dl.stack([a, b, c]).print();
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.stack([a, b, c]).print();
* ```
*
* @param tensors A list of tensor objects with the same shape and dtype.
@@ -892,7 +891,7 @@ export class ArrayOps {
* into the tensor's shape.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
* const axis = 1;
* x.expandDims(axis).print();
* ```
@@ -914,7 +913,7 @@ export class ArrayOps {
* Return an evenly spaced sequence of numbers over the given interval.
*
* ```js
- * dl.linspace(0, 9, 10).print();
+ * tf.linspace(0, 9, 10).print();
* ```
* @param start The start value of the sequence.
* @param stop The end value of the sequence.
@@ -948,7 +947,7 @@ export class ArrayOps {
* supported.
*
* ```js
- * dl.range(0, 9, 2).print();
+ * tf.range(0, 9, 2).print();
* ```
*
* @param start An integer start value
@@ -1003,7 +1002,7 @@ export class ArrayOps {
*
* ```js
* // Create a buffer and set values at particular indices.
- * const buffer = dl.buffer([2, 2]);
+ * const buffer = tf.buffer([2, 2]);
* buffer.set(3, 0, 0);
* buffer.set(5, 1, 0);
*
@@ -1027,7 +1026,7 @@ export class ArrayOps {
*
* ```js
* const verbose = true;
- * dl.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);
* ```
* @param x The tensor to be printed.
* @param verbose Whether to print verbose information about the ` Tensor`,
diff --git a/src/ops/binary_ops.ts b/src/ops/binary_ops.ts
index 2a86523fc0..672f8aec32 100644
--- a/src/ops/binary_ops.ts
+++ b/src/ops/binary_ops.ts
@@ -32,18 +32,18 @@ export class BinaryOps {
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
- * const a = dl.tensor1d([1, 2, 3, 4]);
- * const b = dl.tensor1d([10, 20, 30, 40]);
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
*
- * a.add(b).print(); // or dl.add(a, b)
+ * a.add(b).print(); // or tf.add(a, b)
* ```
*
* ```js
* // Broadcast add a with b.
- * const a = dl.scalar(5);
- * const b = dl.tensor1d([10, 20, 30, 40]);
+ * const a = tf.scalar(5);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
*
- * a.add(b).print(); // or dl.add(a, b)
+ * a.add(b).print(); // or tf.add(a, b)
* ```
* @param a The first `Tensor` to add.
* @param b The second `Tensor` to add. Must have the same type as `a`.
@@ -98,18 +98,18 @@ export class BinaryOps {
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
- * const a = dl.tensor1d([10, 20, 30, 40]);
- * const b = dl.tensor1d([1, 2, 3, 4]);
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
*
- * a.sub(b).print(); // or dl.sub(a, b)
+ * a.sub(b).print(); // or tf.sub(a, b)
* ```
*
* ```js
* // Broadcast subtract a with b.
- * const a = dl.tensor1d([10, 20, 30, 40]);
- * const b = dl.scalar(5);
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.scalar(5);
*
- * a.sub(b).print(); // or dl.sub(a, b)
+ * a.sub(b).print(); // or tf.sub(a, b)
* ```
* @param a The first `Tensor` to subtract from.
* @param b The second `Tensor` to be subtracted. Must have the same dtype as
@@ -167,17 +167,17 @@ export class BinaryOps {
* corresponding elements in x and y.
*
* ```js
- * const a = dl.tensor([[2, 3], [4, 5]])
- * const b = dl.tensor([[1, 2], [3, 0]]).toInt();
+ * const a = tf.tensor([[2, 3], [4, 5]])
+ * const b = tf.tensor([[1, 2], [3, 0]]).toInt();
*
- * a.pow(b).print(); // or dl.pow(a, b)
+ * a.pow(b).print(); // or tf.pow(a, b)
* ```
*
* ```js
- * const a = dl.tensor([[1, 2], [3, 4]])
- * const b = dl.tensor(2).toInt();
+ * const a = tf.tensor([[1, 2], [3, 4]])
+ * const b = tf.tensor(2).toInt();
*
- * a.pow(b).print(); // or dl.pow(a, b)
+ * a.pow(b).print(); // or tf.pow(a, b)
* ```
* We also expose `powStrict` which has the same signature as this op and
* asserts that `base` and `exp` are the same shape (does not broadcast).
@@ -198,8 +198,8 @@ export class BinaryOps {
}
const derBase = () => {
const expFloat = exp.toFloat();
- const dx = expFloat.mul(
- base.toFloat().pow(expFloat.sub(scalar(1)))) as T;
+ const dx =
+ expFloat.mul(base.toFloat().pow(expFloat.sub(scalar(1)))) as T;
return dy.mulStrict(dx) as T;
};
return {base: derBase};
@@ -230,18 +230,18 @@ export class BinaryOps {
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
- * const a = dl.tensor1d([1, 2, 3, 4]);
- * const b = dl.tensor1d([2, 3, 4, 5]);
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([2, 3, 4, 5]);
*
- * a.mul(b).print(); // or dl.mul(a, b)
+ * a.mul(b).print(); // or tf.mul(a, b)
* ```
*
* ```js
* // Broadcast mul a with b.
- * const a = dl.tensor1d([1, 2, 3, 4]);
- * const b = dl.scalar(5);
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.scalar(5);
*
- * a.mul(b).print(); // or dl.mul(a, b)
+ * a.mul(b).print(); // or tf.mul(a, b)
* ```
* @param a The first tensor to multiply.
* @param b The second tensor to multiply. Must have the same dtype as `a`.
@@ -298,18 +298,18 @@ export class BinaryOps {
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
- * const a = dl.tensor1d([1, 4, 9, 16]);
- * const b = dl.tensor1d([1, 2, 3, 4]);
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
*
- * a.div(b).print(); // or dl.div(a, b)
+ * a.div(b).print(); // or tf.div(a, b)
* ```
*
* ```js
* // Broadcast div a with b.
- * const a = dl.tensor1d([2, 4, 6, 8]);
- * const b = dl.scalar(2);
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
*
- * a.div(b).print(); // or dl.div(a, b)
+ * a.div(b).print(); // or tf.div(a, b)
* ```
*
* @param a The first tensor as the numerator.
@@ -366,18 +366,18 @@ export class BinaryOps {
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
- * const a = dl.tensor1d([1, 4, 3, 16]);
- * const b = dl.tensor1d([1, 2, 9, 4]);
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
*
- * a.minimum(b).print(); // or dl.minimum(a, b)
+ * a.minimum(b).print(); // or tf.minimum(a, b)
* ```
*
* ```js
* // Broadcast minimum a with b.
- * const a = dl.tensor1d([2, 4, 6, 8]);
- * const b = dl.scalar(5);
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
*
- * a.minimum(b).print(); // or dl.minimum(a, b)
+ * a.minimum(b).print(); // or tf.minimum(a, b)
* ```
*
* @param a The first tensor.
@@ -418,18 +418,18 @@ export class BinaryOps {
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
- * const a = dl.tensor1d([1, 4, 3, 16]);
- * const b = dl.tensor1d([1, 2, 9, 4]);
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
*
- * a.maximum(b).print(); // or dl.maximum(a, b)
+ * a.maximum(b).print(); // or tf.maximum(a, b)
* ```
*
* ```js
* // Broadcast maximum a with b.
- * const a = dl.tensor1d([2, 4, 6, 8]);
- * const b = dl.scalar(5);
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
*
- * a.maximum(b).print(); // or dl.maximum(a, b)
+ * a.maximum(b).print(); // or tf.maximum(a, b)
* ```
*
* @param a The first tensor.
diff --git a/src/ops/concat.ts b/src/ops/concat.ts
index e5e805808a..e5af04babc 100644
--- a/src/ops/concat.ts
+++ b/src/ops/concat.ts
@@ -122,23 +122,23 @@ export class ConcatOps {
* dimensions except `axis`.
*
* ```js
- * const a = dl.tensor1d([1, 2]);
- * const b = dl.tensor1d([3, 4]);
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
* a.concat(b).print(); // or a.concat(b)
* ```
*
* ```js
- * const a = dl.tensor1d([1, 2]);
- * const b = dl.tensor1d([3, 4]);
- * const c = dl.tensor1d([5, 6]);
- * dl.concat([a, b, c]).print();
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.concat([a, b, c]).print();
* ```
*
* ```js
- * const a = dl.tensor2d([[1, 2], [10, 20]]);
- * const b = dl.tensor2d([[3, 4], [30, 40]]);
+ * const a = tf.tensor2d([[1, 2], [10, 20]]);
+ * const b = tf.tensor2d([[3, 4], [30, 40]]);
* const axis = 1;
- * dl.concat([a, b], axis).print();
+ * tf.concat([a, b], axis).print();
* ```
* @param tensors A list of tensors to concatenate.
* @param axis The axis to concate along. Defaults to 0 (the first dim).
diff --git a/src/ops/matmul.ts b/src/ops/matmul.ts
index 92f2704ebc..b41cf56574 100644
--- a/src/ops/matmul.ts
+++ b/src/ops/matmul.ts
@@ -26,10 +26,10 @@ export class MatmulOps {
* Computes the dot product of two matrices, A * B. These must be matrices.
*
* ```js
- * const a = dl.tensor2d([1, 2], [1, 2]);
- * const b = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const a = tf.tensor2d([1, 2], [1, 2]);
+ * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
- * a.matMul(b).print(); // or dl.matMul(a, b)
+ * a.matMul(b).print(); // or tf.matMul(a, b)
* ```
* @param a First matrix in dot product operation.
* @param b Second matrix in dot product operation.
@@ -140,10 +140,10 @@ export class MatmulOps {
* Computes the outer product of two vectors, v1 and v2.
*
* ```js
- * const a = dl.tensor1d([1, 2, 3]);
- * const b = dl.tensor1d([3, 4, 5]);
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([3, 4, 5]);
*
- * dl.outerProduct(a, b).print();
+ * tf.outerProduct(a, b).print();
* ```
* @param v1 The first vector in the outer product operation.
* @param v2 The second vector in the dot product operation.
diff --git a/src/ops/norm.ts b/src/ops/norm.ts
index 50685266b1..557f03e9f3 100644
--- a/src/ops/norm.ts
+++ b/src/ops/norm.ts
@@ -30,9 +30,9 @@ export class NormOps {
* and matrix norms (Frobenius, 1-norm, and inf-norm).
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
*
- * x.norm().print(); // or dl.norm(x)
+ * x.norm().print(); // or tf.norm(x)
* ```
*
* @param x The input array.
diff --git a/src/ops/reduction_ops.ts b/src/ops/reduction_ops.ts
index 561b36061f..cd4a5a3b9d 100644
--- a/src/ops/reduction_ops.ts
+++ b/src/ops/reduction_ops.ts
@@ -35,16 +35,16 @@ export class ReductionOps {
* single element is returned.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.logSumExp().print(); // or dl.logSumExp(x)
+ * x.logSumExp().print(); // or tf.logSumExp(x)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
- * x.logSumExp(axis).print(); // or dl.logSumExp(a, axis)
+ * x.logSumExp(axis).print(); // or tf.logSumExp(a, axis)
* ```
* @param input The input tensor.
* @param axis The dimension(s) to reduce. If null (the default),
@@ -81,16 +81,16 @@ export class ReductionOps {
* single element is returned.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.sum().print(); // or dl.logSumExp(x)
+ * x.sum().print(); // or tf.logSumExp(x)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
- * x.sum(axis).print(); // or dl.sum(x, axis)
+ * x.sum(axis).print(); // or tf.sum(x, axis)
* ```
*
* @param x The input tensor to compute the sum over.
@@ -147,16 +147,16 @@ export class ReductionOps {
* a single element is returned.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.mean().print(); // or dl.logSumExp(a)
+ * x.mean().print(); // or tf.logSumExp(a)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
- * x.mean(axis).print(); // or dl.mean(x, axis)
+ * x.mean(axis).print(); // or tf.mean(x, axis)
* ```
*
* @param x The input tensor.
@@ -206,16 +206,16 @@ export class ReductionOps {
* single element is returned.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.min().print(); // or dl.min(x)
+ * x.min().print(); // or tf.min(x)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
- * x.min(axis).print(); // or dl.min(x, axis)
+ * x.min(axis).print(); // or tf.min(x, axis)
* ```
*
* @param x The input Tensor.
@@ -252,16 +252,16 @@ export class ReductionOps {
* a single element is returned.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.max().print(); // or dl.max(x)
+ * x.max().print(); // or tf.max(x)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
- * x.max(axis).print(); // or dl.max(x, axis)
+ * x.max(axis).print(); // or tf.max(x, axis)
* ```
*
* @param x The input tensor.
@@ -295,16 +295,16 @@ export class ReductionOps {
* removed.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.argMin().print(); // or dl.argMin(x)
+ * x.argMin().print(); // or tf.argMin(x)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 4, 3], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
*
* const axis = 1;
- * x.argMin(axis).print(); // or dl.argMin(x, axis)
+ * x.argMin(axis).print(); // or tf.argMin(x, axis)
* ```
*
* @param x The input tensor.
@@ -331,16 +331,16 @@ export class ReductionOps {
* removed.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3]);
+ * const x = tf.tensor1d([1, 2, 3]);
*
- * x.argMax().print(); // or dl.argMax(x)
+ * x.argMax().print(); // or tf.argMax(x)
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 4, 3], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
*
* const axis = 1;
- * x.argMax(axis).print(); // or dl.argMax(x, axis)
+ * x.argMax(axis).print(); // or tf.argMax(x, axis)
* ```
*
* @param x The input tensor.
diff --git a/src/ops/reverse.ts b/src/ops/reverse.ts
index 20bd76b4c0..ee413813da 100644
--- a/src/ops/reverse.ts
+++ b/src/ops/reverse.ts
@@ -75,13 +75,13 @@ export class ReverseOps {
* Reverses a `Tensor` along a specified axis.
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
*
* x.reverse().print();
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.reverse(axis).print();
diff --git a/src/ops/slice.ts b/src/ops/slice.ts
index 4437a0d4a1..7e942000b4 100644
--- a/src/ops/slice.ts
+++ b/src/ops/slice.ts
@@ -79,19 +79,19 @@ export class SliceOps {
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that `x` is of the given rank:
- * - `dl.slice1d`
- * - `dl.slice2d`
- * - `dl.slice3d`
- * - `dl.slice4d`
+ * - `tf.slice1d`
+ * - `tf.slice2d`
+ * - `tf.slice3d`
+ * - `tf.slice4d`
*
* ```js
- * const x = dl.tensor1d([1, 2, 3, 4]);
+ * const x = tf.tensor1d([1, 2, 3, 4]);
*
* x.slice([1], [2]).print();
* ```
*
* ```js
- * const x = dl.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* x.slice([1, 0], [1, 2]).print();
* ```
diff --git a/src/ops/softmax.ts b/src/ops/softmax.ts
index ea1cc84cd8..cd4c0b977a 100644
--- a/src/ops/softmax.ts
+++ b/src/ops/softmax.ts
@@ -29,15 +29,15 @@ export class SoftmaxOps {
* Computes the softmax normalized vector given the logits.
*
* ```js
- * const a = dl.tensor1d([1, 2, 3]);
+ * const a = tf.tensor1d([1, 2, 3]);
*
- * a.softmax().print(); // or dl.softmax(a)
+ * a.softmax().print(); // or tf.softmax(a)
* ```
*
* ```js
- * const a = dl.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
+ * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
*
- * a.softmax().print(); // or dl.softmax(a)
+ * a.softmax().print(); // or tf.softmax(a)
* ```
*
* @param logits The logits array.
diff --git a/src/ops/transpose.ts b/src/ops/transpose.ts
index a725174075..d2a18f99ad 100644
--- a/src/ops/transpose.ts
+++ b/src/ops/transpose.ts
@@ -32,9 +32,9 @@ export class TransposeOps {
* operation performs a regular matrix transpose on 2-D input `Tensor`s.
*
* ```js
- * const a = dl.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);
+ * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);
*
- * a.transpose().print(); // or dl.transpose(a)
+ * a.transpose().print(); // or tf.transpose(a)
* ```
*
* @param x The tensor to transpose.
diff --git a/src/ops/unary_ops.ts b/src/ops/unary_ops.ts
index 01ef0d370e..aed5d3efb2 100644
--- a/src/ops/unary_ops.ts
+++ b/src/ops/unary_ops.ts
@@ -29,9 +29,9 @@ export class UnaryOps {
* Computes `-1 * x` element-wise.
*
* ```js
- * const x = dl.tensor2d([1, 2, -2, 0], [2, 2]);
+ * const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);
*
- * x.neg().print(); // or dl.neg(x)
+ * x.neg().print(); // or tf.neg(x)
* ```
*
* @param x The input tensor.
@@ -49,9 +49,9 @@ export class UnaryOps {
* Computes ceiling of input `Tensor` element-wise: `ceil(x)`
*
* ```js
- * const x = dl.tensor1d([.6, 1.1, -3.3]);
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
*
- * x.ceil().print(); // or dl.ceil(x)
+ * x.ceil().print(); // or tf.ceil(x)
* ```
* @param x The input Tensor.
*/
@@ -69,9 +69,9 @@ export class UnaryOps {
* Computes floor of input `Tensor` element-wise: `floor(x)`.
*
* ```js
- * const x = dl.tensor1d([.6, 1.1, -3.3]);
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
*
- * x.floor().print(); // or dl.floor(x)
+ * x.floor().print(); // or tf.floor(x)
* ```
* @param x The input tensor.
*/
@@ -90,9 +90,9 @@ export class UnaryOps {
* Computes exponential of the input `Tensor` element-wise. `e ^ x`
*
* ```js
- * const x = dl.tensor1d([1, 2, -3]);
+ * const x = tf.tensor1d([1, 2, -3]);
*
- * x.exp().print(); // or dl.exp(x)
+ * x.exp().print(); // or tf.exp(x)
* ```
* @param x The input tensor.
*/
@@ -111,9 +111,9 @@ export class UnaryOps {
* Computes natural logarithm of the input `Tensor` element-wise: `ln(x)`
*
* ```js
- * const x = dl.tensor1d([1, 2, Math.E]);
+ * const x = tf.tensor1d([1, 2, Math.E]);
*
- * x.log().print(); // or dl.log(x)
+ * x.log().print(); // or tf.log(x)
* ```
* @param x The input tensor.
*/
@@ -131,9 +131,9 @@ export class UnaryOps {
* element-wise: `ln(1 + x)`
*
* ```js
- * const x = dl.tensor1d([1, 2, Math.E - 1]);
+ * const x = tf.tensor1d([1, 2, Math.E - 1]);
*
- * x.log1p().print(); // or dl.log1p(x)
+ * x.log1p().print(); // or tf.log1p(x)
* ```
* @param x The input tensor.
*/
@@ -150,9 +150,9 @@ export class UnaryOps {
* Computes square root of the input `Tensor` element-wise: `y = sqrt(x)`
*
* ```js
- * const x = dl.tensor1d([1, 2, 4, -1]);
+ * const x = tf.tensor1d([1, 2, 4, -1]);
*
- * x.sqrt().print(); // or dl.sqrt(x)
+ * x.sqrt().print(); // or tf.sqrt(x)
* ```
* @param x The input tensor.
*/
@@ -169,9 +169,9 @@ export class UnaryOps {
* Computes square of `x` element-wise: `x ^ 2`
*
* ```js
- * const x = dl.tensor1d([1, 2, Math.sqrt(2), -1]);
+ * const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);
*
- * x.square().print(); // or dl.square(x)
+ * x.square().print(); // or tf.square(x)
* ```
* @param x The input Tensor.
*/
@@ -188,9 +188,9 @@ export class UnaryOps {
* Computes absolute value element-wise: `abs(x)`
*
* ```js
- * const x = dl.tensor1d([-1, 2, -3, 4]);
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
*
- * x.abs().print(); // or dl.abs(x)
+ * x.abs().print(); // or tf.abs(x)
* ```
* @param x The input `Tensor`.
*/
@@ -207,9 +207,9 @@ export class UnaryOps {
* Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
*
* ```js
- * const x = dl.tensor1d([-1, 2, -3, 4]);
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
*
- * x.clipByValue(-2, 3).print(); // or dl.clipByValue(x, -2, 3)
+ * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
* ```
* @param x The input tensor.
* @param clipValueMin Lower-bound of range to be clipped to.
@@ -241,9 +241,9 @@ export class UnaryOps {
* Computes rectified linear element-wise: `max(x, 0)`
*
* ```js
- * const x = dl.tensor1d([-1, 2, -3, 4]);
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
*
- * x.relu().print(); // or dl.relu(x)
+ * x.relu().print(); // or tf.relu(x)
* ```
* @param x The input tensor.
*/
@@ -261,9 +261,9 @@ export class UnaryOps {
* Computes exponential linear element-wise, `x > 0 ? e ^ x - 1 : 0`
*
* ```js
- * const x = dl.tensor1d([-1, 1, -3, 2]);
+ * const x = tf.tensor1d([-1, 1, -3, 2]);
*
- * x.elu().print(); // or dl.elu(x)
+ * x.elu().print(); // or tf.elu(x)
* ```
* @param x The input tensor.
*/
@@ -282,9 +282,9 @@ export class UnaryOps {
* `x < 0 ? scale * alpha * (exp(x) - 1) : x`
*
* ```js
- * const x = dl.tensor1d([-1, 2, -3, 4]);
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
*
- * x.selu().print(); // or dl.selu(x)
+ * x.selu().print(); // or tf.selu(x)
* ```
* @param x The input tensor.
*/
@@ -317,9 +317,9 @@ export class UnaryOps {
* http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
*
* ```js
- * const x = dl.tensor1d([-1, 2, -3, 4]);
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
*
- * x.leakyRelu(0.1).print(); // or dl.leakyRelu(x, 0.1)
+ * x.leakyRelu(0.1).print(); // or tf.leakyRelu(x, 0.1)
* ```
* @param x The input tensor.
* @param alpha The scaling factor for negative values, defaults to 0.2.
@@ -340,10 +340,10 @@ export class UnaryOps {
* `x < 0 ? alpha * x : f(x) = x`
*
* ```js
- * const x = dl.tensor1d([-1, 2, -3, 4]);
- * const alpha = dl.scalar(0.1);
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ * const alpha = tf.scalar(0.1);
*
- * x.prelu(alpha).print(); // or dl.prelu(x, alpha)
+ * x.prelu(alpha).print(); // or tf.prelu(x, alpha)
* ```
* @param x The input tensor.
* @param alpha Scaling factor for negative values.
@@ -361,9 +361,9 @@ export class UnaryOps {
* Computes sigmoid element-wise, `1 / (1 + exp(-x))`
*
* ```js
- * const x = dl.tensor1d([0, -1, 2, -3]);
+ * const x = tf.tensor1d([0, -1, 2, -3]);
*
- * x.sigmoid().print(); // or dl.sigmoid(x)
+ * x.sigmoid().print(); // or tf.sigmoid(x)
* ```
* @param x The input tensor.
*/
@@ -382,9 +382,9 @@ export class UnaryOps {
* Computes sin of the input Tensor element-wise: `sin(x)`
*
* ```js
- * const x = dl.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
*
- * x.sin().print(); // or dl.sin(x)
+ * x.sin().print(); // or tf.sin(x)
* ```
* @param x The input tensor.
*/
@@ -401,9 +401,9 @@ export class UnaryOps {
* Computes cos of the input `Tensor` element-wise: `cos(x)`
*
* ```js
- * const x = dl.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
*
- * x.cos().print(); // or dl.cos(x)
+ * x.cos().print(); // or tf.cos(x)
* ```
* @param x The input tensor.
*/
@@ -420,9 +420,9 @@ export class UnaryOps {
* Computes tan of the input `Tensor` element-wise, `tan(x)`
*
* ```js
- * const x = dl.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
*
- * x.tan().print(); // or dl.tan(x)
+ * x.tan().print(); // or tf.tan(x)
* ```
* @param x The input tensor.
*/
@@ -439,9 +439,9 @@ export class UnaryOps {
* Computes asin of the input `Tensor` element-wise: `asin(x)`
*
* ```js
- * const x = dl.tensor1d([0, 1, -1, .7]);
+ * const x = tf.tensor1d([0, 1, -1, .7]);
*
- * x.asin().print(); // or dl.asin(x)
+ * x.asin().print(); // or tf.asin(x)
* ```
* @param x The input tensor.
*/
@@ -461,9 +461,9 @@ export class UnaryOps {
* Computes acos of the input `Tensor` element-wise: `acos(x)`
*
* ```js
- * const x = dl.tensor1d([0, 1, -1, .7]);
+ * const x = tf.tensor1d([0, 1, -1, .7]);
*
- * x.acos().print(); // or dl.acos(x)
+ * x.acos().print(); // or tf.acos(x)
* ```
* @param x The input tensor.
*/
@@ -484,9 +484,9 @@ export class UnaryOps {
* Computes atan of the input `Tensor` element-wise: `atan(x)`
*
* ```js
- * const x = dl.tensor1d([0, 1, -1, .7]);
+ * const x = tf.tensor1d([0, 1, -1, .7]);
*
- * x.atan().print(); // or dl.atan(x)
+ * x.atan().print(); // or tf.atan(x)
* ```
* @param x The input tensor.
*/
@@ -503,9 +503,9 @@ export class UnaryOps {
* Computes hyperbolic sin of the input `Tensor` element-wise: `sinh(x)`
*
* ```js
- * const x = dl.tensor1d([0, 1, -1, .7]);
+ * const x = tf.tensor1d([0, 1, -1, .7]);
*
- * x.sinh().print(); // or dl.sinh(x)
+ * x.sinh().print(); // or tf.sinh(x)
* ```
* @param x The input tensor.
*/
@@ -522,9 +522,9 @@ export class UnaryOps {
* Computes hyperbolic cos of the input `Tensor` element-wise: `cosh(x)`
*
* ```js
- * const x = dl.tensor1d([0, 1, -1, .7]);
+ * const x = tf.tensor1d([0, 1, -1, .7]);
*
- * x.cosh().print(); // or dl.cosh(x)
+ * x.cosh().print(); // or tf.cosh(x)
* ```
* @param x The input tensor.
*/
@@ -541,9 +541,9 @@ export class UnaryOps {
* Computes hyperbolic tangent of the input `Tensor` element-wise: `tanh(x)`
*
* ```js
- * const x = dl.tensor1d([0, 1, -1, 70]);
+ * const x = tf.tensor1d([0, 1, -1, 70]);
*
- * x.tanh().print(); // or dl.tanh(x)
+ * x.tanh().print(); // or tf.tanh(x)
* ```
* @param x The input tensor.
*/
@@ -562,9 +562,9 @@ export class UnaryOps {
* Computes step of the input `Tensor` element-wise: `x > 0 ? 1 : alpha * x`
*
* ```js
- * const x = dl.tensor1d([0, 2, -1, -3]);
+ * const x = tf.tensor1d([0, 2, -1, -3]);
*
- * x.step(.5).print(); // or dl.step(x, .5)
+ * x.step(.5).print(); // or tf.step(x, .5)
* ```
* @param x The input tensor.
* @param alpha The gradient when input is negative.
diff --git a/src/optimizers/optimizer_constructors.ts b/src/optimizers/optimizer_constructors.ts
index a77de1068a..9118f11ea1 100644
--- a/src/optimizers/optimizer_constructors.ts
+++ b/src/optimizers/optimizer_constructors.ts
@@ -31,19 +31,19 @@ export class OptimizerConstructors {
*
* ```js
* // Fit a quadratic function by learning the coefficients a, b, c.
- * const xs = dl.tensor1d([0, 1, 2, 3]);
- * const ys = dl.tensor1d([1.1, 5.9, 16.8, 33.9]);
+ * const xs = tf.tensor1d([0, 1, 2, 3]);
+ * const ys = tf.tensor1d([1.1, 5.9, 16.8, 33.9]);
*
- * const a = dl.scalar(Math.random()).variable();
- * const b = dl.scalar(Math.random()).variable();
- * const c = dl.scalar(Math.random()).variable();
+ * const a = tf.scalar(Math.random()).variable();
+ * const b = tf.scalar(Math.random()).variable();
+ * const c = tf.scalar(Math.random()).variable();
*
* // y = a * x^2 + b * x + c.
* const f = x => a.mul(x.square()).add(b.mul(x)).add(c);
* const loss = (pred, label) => pred.sub(label).square().mean();
*
* const learningRate = 0.01;
- * const optimizer = dl.train.sgd(learningRate);
+ * const optimizer = tf.train.sgd(learningRate);
*
* // Train the model.
* for (let i = 0; i < 10; i++) {
diff --git a/src/tensor.ts b/src/tensor.ts
index 08e54444a7..c445893c45 100644
--- a/src/tensor.ts
+++ b/src/tensor.ts
@@ -885,8 +885,8 @@ export class Variable extends Tensor {
/**
* Creates a new variable with the provided initial value.
* ```js
- * const x = dl.variable(dl.tensor([1, 2, 3]));
- * x.assign(dl.tensor([4, 5, 6]));
+ * const x = tf.variable(tf.tensor([1, 2, 3]));
+ * x.assign(tf.tensor([4, 5, 6]));
*
* x.print();
* ```
diff --git a/src/tracking.ts b/src/tracking.ts
index b53c912438..639b919b13 100644
--- a/src/tracking.ts
+++ b/src/tracking.ts
@@ -39,20 +39,20 @@ export class Tracking {
*
* ```js
* // y = 2 ^ 2 + 1
- * const y = dl.tidy(() => {
+ * const y = tf.tidy(() => {
* // a, b, and one will be cleaned up when the tidy ends.
- * const one = dl.scalar(1);
- * const a = dl.scalar(2);
+ * const one = tf.scalar(1);
+ * const a = tf.scalar(2);
* const b = a.square();
*
- * console.log('numTensors (in tidy): ' + dl.memory().numTensors);
+ * console.log('numTensors (in tidy): ' + tf.memory().numTensors);
*
* // The value returned inside the tidy function will return
* // through the tidy, in this case to the variable y.
* return b.add(one);
* });
*
- * console.log('numTensors (outside tidy): ' + dl.memory().numTensors);
+ * console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
* y.print();
* ```
*
@@ -70,7 +70,7 @@ export class Tracking {
if (fn == null) {
// Called with only 1 argument.
if (typeof nameOrFn !== 'function') {
- throw new Error('Please provide a function to dl.tidy()');
+ throw new Error('Please provide a function to tidy()');
}
fn = nameOrFn;
} else {
@@ -78,12 +78,12 @@ export class Tracking {
if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {
throw new Error(
'When calling with two arguments, the first argument ' +
- 'to dl.tidy() must be a string');
+ 'to tidy() must be a string');
}
if (typeof fn !== 'function') {
throw new Error(
'When calling with two arguments, the 2nd argument ' +
- 'to dl.tidy() must be a function');
+ 'to tidy() must be a function');
}
name = nameOrFn as string;
// TODO(nsthorat,smilkov): Do operation logging and performance
@@ -119,22 +119,22 @@ export class Tracking {
*
* ```js
* let b;
- * const y = dl.tidy(() => {
- * const one = dl.scalar(1);
- * const a = dl.scalar(2);
+ * const y = tf.tidy(() => {
+ * const one = tf.scalar(1);
+ * const a = tf.scalar(2);
*
* // b will not be cleaned up by the tidy. a and one will be cleaned up
* // when the tidy ends.
- * b = dl.keep(a.square());
+ * b = tf.keep(a.square());
*
- * console.log('numTensors (in tidy): ' + dl.memory().numTensors);
+ * console.log('numTensors (in tidy): ' + tf.memory().numTensors);
*
* // The value returned inside the tidy function will return
* // through the tidy, in this case to the variable y.
* return b.add(one);
* });
*
- * console.log('numTensors (outside tidy): ' + dl.memory().numTensors);
+ * console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
* console.log('y:');
* y.print();
* console.log('b:');
@@ -161,8 +161,8 @@ export class Tracking {
* - `downloadWaitMs`: CPU blocking time on texture downloads (readPixels).
*
* ```js
- * const x = dl.randomNormal([20, 20]);
- * const time = await dl.time(() => x.matMul(x));
+ * const x = tf.randomNormal([20, 20]);
+ * const time = await tf.time(() => x.matMul(x));
*
* console.log(`kernelMs: ${time.kernelMs}, wallTimeMs: ${time.wallMs}`);
* ```