diff --git a/index.html b/index.html
index 54b5fdf07..4a29472e8 100644
--- a/index.html
+++ b/index.html
@@ -204,6 +204,7 @@
+
WebRTC samples
+ Breakout Box video processing in worker
+
+
This sample shows how to perform processing on a video stream using the experimental
+ mediacapture-transform API
+ in a Worker.
+
+
+
+
+
+
+ Transform:
+
+ WebGL background blur
+ WebGPU/WebNN background blur
+ WebGL wrap
+ Canvas2D
+ Do nothing
+ Drop frames at random
+ Delay all frames by 100ms
+ Run frames through WebCodec
+
+
+
+
+ Start
+ Stop
+
+
+
+ Note : This sample is using an experimental API that has not yet been standardized. As
+ of 2021-07-16, this API is available in Chrome M91 if the experimental code is enabled on
+ the command line with
+ --enable-blink-features=MediaStreamInsertableStreams
.
+
+
View source on GitHub
+
+
+
+
+
+
+
+
+
+
diff --git a/src/content/insertable-streams/video-processing-worker/js/main.js b/src/content/insertable-streams/video-processing-worker/js/main.js
new file mode 100644
index 000000000..205ee2aae
--- /dev/null
+++ b/src/content/insertable-streams/video-processing-worker/js/main.js
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree.
+ */
+
+'use strict';
+
+/* global MediaStreamTrackProcessor, MediaStreamTrackGenerator */
+if (typeof MediaStreamTrackProcessor === 'undefined' ||
+ typeof MediaStreamTrackGenerator === 'undefined') {
+ alert(
+ 'Your browser does not support the experimental MediaStreamTrack API ' +
+ 'for Insertable Streams of Media. See the note at the bottom of the ' +
+ 'page.');
+}
+
+const startButton = document.getElementById('startButton');
+const stopButton = document.getElementById('stopButton');
+const localVideo = document.getElementById('localVideo');
+const croppedVideo = document.getElementById('croppedVideo');
+const transformSelector = document.getElementById('transformSelector');
+
+const stats = new Stats();
+stats.showPanel(0); // 0: fps, 1: ms, 2: mb, 3+: custom
+document.body.appendChild(stats.dom);
+const updateFPS = (now, metadata) => {
+ stats.update();
+ croppedVideo.requestVideoFrameCallback(updateFPS);
+};
+croppedVideo.requestVideoFrameCallback(updateFPS);
+
+const worker = new Worker('./js/worker.js', {name: 'Video processing worker'});
+let stream = null;
+startButton.addEventListener('click', async () => {
+ stream = await navigator.mediaDevices.getUserMedia({audio: false, video: true});
+ localVideo.srcObject = stream;
+
+ const [track] = stream.getTracks();
+ const processor = new MediaStreamTrackProcessor({track});
+ const {readable} = processor;
+
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ const {writable} = generator;
+ croppedVideo.srcObject = new MediaStream([generator]);
+
+ worker.postMessage({
+ operation: 'start',
+ transformType: transformSelector.value,
+ readable,
+ writable,
+ }, [readable, writable]);
+ stopButton.disabled = false;
+ startButton.disabled = true;
+});
+
+stopButton.addEventListener('click', async () => {
+ localVideo.pause();
+ localVideo.srcObject = null;
+ croppedVideo.pause();
+ croppedVideo.srcObject = null;
+ if (stream) {
+ stream.getTracks().forEach(t => t.stop());
+ }
+ worker.postMessage({operation: 'stop'});
+ stopButton.disabled = true;
+ startButton.disabled = false;
+});
diff --git a/src/content/insertable-streams/video-processing-worker/js/worker.js b/src/content/insertable-streams/video-processing-worker/js/worker.js
new file mode 100644
index 000000000..878c5a0b3
--- /dev/null
+++ b/src/content/insertable-streams/video-processing-worker/js/worker.js
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree.
+ */
+importScripts('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js')
+importScripts('../../video-processing/js/webgl-background-blur.js');
+importScripts('../../video-processing/js/webgpu-background-blur.js');
+importScripts('../../video-processing/js/canvas-transform.js');
+importScripts('../../video-processing/js/simple-transforms.js');
+importScripts('../../video-processing/js/webcodec-transform.js');
+importScripts('../../video-processing/js/webgl-transform.js/');
+importScripts('../../video-processing/js/webnn-deeplabv3.js');
+importScripts('../../../../js/third_party/numpy.js');
+
+'use strict';
+
+let frameTransform = null;
+
+async function transform(frame, controller) {
+ if (frameTransform) {
+ await frameTransform.transform(frame, controller);
+ }
+}
+
+onmessage = async (event) => {
+ const {operation, transformType} = event.data;
+ if (operation === 'start') {
+ switch (transformType) {
+ case 'webgl':
+ frameTransform = new WebGLTransform();
+ break;
+ case 'webgl-background-blur':
+ frameTransform = new WebGLBackgroundBlurTransform();
+ break;
+ case 'webgpu-background-blur':
+ frameTransform = new WebGPUBackgroundBlurTransform();
+ break;
+ case 'canvas2d':
+ frameTransform = new CanvasTransform();
+ break;
+ case 'drop':
+ // Defined in simple-transforms.js.
+ frameTransform = new DropTransform();
+ break;
+ case 'noop':
+ // Defined in simple-transforms.js.
+ frameTransform = new NullTransform();
+ break;
+ case 'delay':
+ // Defined in simple-transforms.js.
+ frameTransform = new DelayTransform();
+ break;
+ case 'webcodec':
+ // Defined in webcodec-transform.js
+ frameTransform = new WebCodecTransform();
+ break;
+ default:
+ throw new Error(`unknown transform ${transformType}`);
+ break;
+ }
+ frameTransform.init();
+ const {readable, writable} = event.data;
+ readable
+ .pipeThrough(new TransformStream({transform}))
+ .pipeTo(writable);
+ } else if (operation === 'stop') {
+ frameTransform.destroy();
+ } else {
+ throw new Error(`unknown operation ${operation}`);
+ }
+};
diff --git a/src/content/insertable-streams/video-processing/js/webgl-background-blur.js b/src/content/insertable-streams/video-processing/js/webgl-background-blur.js
index 3c6a1c523..7441f8b53 100644
--- a/src/content/insertable-streams/video-processing/js/webgl-background-blur.js
+++ b/src/content/insertable-streams/video-processing/js/webgl-background-blur.js
@@ -49,8 +49,13 @@ class WebGLBackgroundBlurTransform { // eslint-disable-line no-unused-vars
// tfjs deeplab model for segmentation
this.deeplab_ = null;
- this.blurBackgroundCheckbox_ = (/** @type {!HTMLInputElement} */ (
- document.getElementById('segmentBackground')));
+ this.isWorker_ = typeof DedicatedWorkerGlobalScope !== 'undefined' &&
+ globalThis instanceof DedicatedWorkerGlobalScope;
+
+ if (!this.isWorker_) {
+ this.blurBackgroundCheckbox_ = (/** @type {!HTMLInputElement} */ (
+ document.getElementById('segmentBackground')));
+ }
}
/** @override */
async init() {
@@ -277,13 +282,18 @@ class WebGLBackgroundBlurTransform { // eslint-disable-line no-unused-vars
// Segmentation
- const isSegmentBackground = this.blurBackgroundCheckbox_.checked ? true : false;
+ const isSegmentBackground = this.isWorker_ ?
+ true : (this.blurBackgroundCheckbox_.checked ? true : false);
let resultTensor;
let resultGPUData;
if (isSegmentBackground) {
if (!this.deeplab_) {
await tf.setBackend(customBackendName);
- this.deeplab_ = await tf.loadGraphModel('../../../tfjs-models/deeplab_pascal_1_default_1/model.json');
+ let modelUrl = '../../../models/deeplab_pascal_1_default_1/model.json';
+ if (this.isWorker_) {
+ modelUrl = '../' + modelUrl;
+ }
+ this.deeplab_ = await tf.loadGraphModel(modelUrl);
console.log('DeepLab model loaded', this.deeplab_);
}
const resizedVideoBitmap = await createImageBitmap(
diff --git a/src/content/insertable-streams/video-processing/js/webgpu-background-blur.js b/src/content/insertable-streams/video-processing/js/webgpu-background-blur.js
index 739c73b76..b9ff40cc5 100644
--- a/src/content/insertable-streams/video-processing/js/webgpu-background-blur.js
+++ b/src/content/insertable-streams/video-processing/js/webgpu-background-blur.js
@@ -211,10 +211,14 @@ const batch = [4, 4];
this.deeplab_ = null;
- this.blurBackgroundCheckbox_ = (/** @type {!HTMLInputElement} */ (
- document.getElementById('segmentBackground')));
+ this.isWorker_ = typeof DedicatedWorkerGlobalScope !== 'undefined' &&
+ globalThis instanceof DedicatedWorkerGlobalScope;
+ if (!this.isWorker_) {
+ this.blurBackgroundCheckbox_ = (/** @type {!HTMLInputElement} */ (
+ document.getElementById('segmentBackground')));
- this.gui_ = null;
+ this.gui_ = null;
+ }
}
/** @override */
@@ -355,12 +359,15 @@ const batch = [4, 4];
new Uint32Array([settings.filterSize, blockDim])
);
};
- if (this.gui_) {
- this.gui_.destroy();
+
+ if (!this.isWorker_) {
+ if (this.gui_) {
+ this.gui_.destroy();
+ }
+ this.gui_ = new dat.GUI();
+ this.gui_.add(settings, 'filterSize', 1, 33).step(2).onChange(updateSettings);
+ this.gui_.add(settings, 'iterations', 1, 10).step(1);
}
- this.gui_ = new dat.GUI();
- this.gui_.add(settings, 'filterSize', 1, 33).step(2).onChange(updateSettings);
- this.gui_.add(settings, 'iterations', 1, 10).step(1);
updateSettings();
@@ -434,7 +441,8 @@ const batch = [4, 4];
return;
}
- const isSegmentBackground = this.blurBackgroundCheckbox_.checked ? true : false;
+ const isSegmentBackground = this.isWorker_ ?
+ true : (this.blurBackgroundCheckbox_.checked ? true : false);
// Set output size to input size
const frameWidth = frame.displayWidth;
@@ -442,7 +450,7 @@ const batch = [4, 4];
if (canvas.width !== frameWidth || canvas.height !== frameHeight) {
canvas.width = frameWidth;
canvas.height = frameHeight;
- const devicePixelRatio = window.devicePixelRatio || 1;
+ const devicePixelRatio = this.isWorker_ ? 1 : (window.devicePixelRatio || 1);
const presentationSize = [
canvas.width * devicePixelRatio,
canvas.height * devicePixelRatio,
@@ -690,7 +698,9 @@ const batch = [4, 4];
this.device_ = null;
}
this.deeplab_ = null;
- this.gui_.destroy();
- this.gui_ = null;
+ if (!this.isWorker_) {
+ this.gui_.destroy();
+ this.gui_ = null;
+ }
}
}
diff --git a/src/content/insertable-streams/video-processing/js/webnn-deeplabv3.js b/src/content/insertable-streams/video-processing/js/webnn-deeplabv3.js
index 21604d9fc..7a7f84f6b 100644
--- a/src/content/insertable-streams/video-processing/js/webnn-deeplabv3.js
+++ b/src/content/insertable-streams/video-processing/js/webnn-deeplabv3.js
@@ -59,7 +59,12 @@ async function buildConstantByNpy(device, builder, url) {
// DeepLab V3 MobileNet V2 model with 'nchw' input layout
class DeepLabV3MNV2Nchw {
constructor() {
- this.weightsUrl_ = './models/deeplabv3_1_default_1_nchw/weights/';
+ this.weightsUrl_ = '../../../models/deeplabv3_1_default_1_nchw/weights/';
+ const isWorker = typeof DedicatedWorkerGlobalScope !== 'undefined' &&
+ globalThis instanceof DedicatedWorkerGlobalScope;
+ if (isWorker) {
+ this.weightsUrl_ = '../' + this.weightsUrl_;
+ }
// Shares the same bias files with 'nhwc' layout
this.biasUrl_ = this.weightsUrl_;
this.inputOptions = {