Skip to content

Commit

Permalink
Add WebGPU only background blur pipeline
Browse files Browse the repository at this point in the history
  • Loading branch information
huningxin committed Apr 28, 2022
1 parent add81ff commit 33b12ca
Show file tree
Hide file tree
Showing 6 changed files with 118 additions and 77 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ <h1><a href="//webrtc.github.io/samples/" title="WebRTC samples homepage">WebRTC
<span>Transform:</span>
<select id="transformSelector">
<option selected value="webgl-background-blur">WebGL background blur</option>
<option value="webgpu-background-blur">WebGPU/WebNN background blur</option>
<option value="webgpu-background-blur">WebGPU background blur</option>
<option value="webnn-webgpu-background-blur">WebNN/WebGPU background blur</option>
<option value="webgl">WebGL wrap</option>
<option value="canvas2d">Canvas2D</option>
<option value="noop">Do nothing</option>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* tree.
*/
importScripts('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js')
importScripts('../../../../js/third_party/tfjs/tf-backend-webgpu.js');
importScripts('../../video-processing/js/webgl-background-blur.js');
importScripts('../../video-processing/js/webgpu-background-blur.js');
importScripts('../../video-processing/js/canvas-transform.js');
Expand Down Expand Up @@ -38,6 +39,9 @@ onmessage = async (event) => {
case 'webgpu-background-blur':
frameTransform = new WebGPUBackgroundBlurTransform();
break;
case 'webnn-webgpu-background-blur':
frameTransform = new WebGPUBackgroundBlurTransform(true);
break;
case 'canvas2d':
frameTransform = new CanvasTransform();
break;
Expand Down
6 changes: 4 additions & 2 deletions src/content/insertable-streams/video-processing/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,9 @@ <h1><a href="//webrtc.github.io/samples/" title="WebRTC samples homepage">WebRTC
<div class="box">
<span>Transform:</span>
<select id="transformSelector" disabled>
<option selected value="webgl-background-blur">WebGL blur</option>
<option value="webgpu-background-blur">WebGPU blur</option>
<option selected value="webgl-background-blur">WebGL background blur</option>
<option value="webgpu-background-blur">WebGPU background blur</option>
<option value="webnn-webgpu-background-blur">WebNN/WebGPU background blur</option>
<option value="webgl">WebGL wrap</option>
<option value="canvas2d">Canvas2D</option>
<option value="noop">Do nothing</option>
Expand Down Expand Up @@ -89,6 +90,7 @@ <h1><a href="//webrtc.github.io/samples/" title="WebRTC samples homepage">WebRTC
</div>

<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js"></script>
<script src="../../../js/third_party/tfjs/tf-backend-webgpu.js"></script>

<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="js/camera-source.js" async></script>
Expand Down
10 changes: 3 additions & 7 deletions src/content/insertable-streams/video-processing/js/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -218,13 +218,6 @@ function initUI() {
transformSelector.options[transformSelector.selectedIndex].value;
if (transformType.indexOf('blur') !== -1) {
segmentBackgroundSpan.style.display = "inline";
const segmentBackend = (/** @type {!HTMLElement} */ (
document.getElementById('segmentBackend')));
if (transformType === 'webgl-background-blur') {
segmentBackend.innerHTML = 'Segment background by TF.js WebGL backend: ';
} else if (transformType === 'webgpu-background-blur') {
segmentBackend.innerHTML = 'Segment background by WebNN: ';
}
} else {
segmentBackgroundSpan.style.display = "none";
}
Expand All @@ -244,6 +237,9 @@ function initUI() {
case 'webgpu-background-blur':
pipeline.updateTransform(new WebGPUBackgroundBlurTransform());
break;
case 'webnn-webgpu-background-blur':
pipeline.updateTransform(new WebGPUBackgroundBlurTransform(true));
break;
case 'canvas2d':
pipeline.updateTransform(new CanvasTransform());
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ const batch = [4, 4];
* @implements {FrameTransform} in pipeline.js
*/
class WebGPUBackgroundBlurTransform { // eslint-disable-line no-unused-vars
constructor() {
constructor(useWebNN = false) {
// All fields are initialized in init()
/** @private {?OffscreenCanvas} canvas used to render video frame */
this.canvas_ = null;
Expand All @@ -210,6 +210,7 @@ const batch = [4, 4];
this.segmapBuffer_ = null;

this.deeplab_ = null;
this.useWebNN_ = useWebNN;
this.hasWebNN_ = true; // will check WebNN when init deeplab

this.isWorker_ = typeof DedicatedWorkerGlobalScope !== 'undefined' &&
Expand Down Expand Up @@ -237,7 +238,8 @@ const batch = [4, 4];
}
const adapter = await navigator.gpu.requestAdapter();
this.adapter_ = adapter;
const device = await adapter.requestDevice();
await tf.setBackend('webgpu');
const device = tf.engine().backendInstance.device;
if (!device) {
throw new Error('Failed to create GPUDevice.');
}
Expand Down Expand Up @@ -468,6 +470,89 @@ const batch = [4, 4];
});
this.initResources_(frameWidth, frameHeight);
}

let resultTensor = null;
let segmapBuffer = null;
if (isSegmentBackground) {
if (!this.deeplab_) {
if (this.useWebNN_) {
this.deeplab_ = new DeepLabV3MNV2Nchw()
this.hasWebNN_ = await this.deeplab_.init(this.device_);
if (!this.hasWebNN_) {
this.deeplab_ = null;
if (!this.isWorker_) {
this.blurBackgroundCheckbox_.checked = false;
}
}
} else {
let modelUrl = '../../../models/deeplab_pascal_1_default_1/model.json';
if (this.isWorker_) {
modelUrl = '../' + modelUrl;
}
this.deeplab_ = await tf.loadGraphModel(modelUrl);
console.log('DeepLab model loaded', this.deeplab_);
}
}

const resizedVideoBitmap = await createImageBitmap(
frame, {resizeWidth: this.segmentationWidth_, resizeHeight: this.segmentationHeight_});

if (this.useWebNN_) {
device.queue.copyExternalImageToTexture(
{ source: resizedVideoBitmap },
{ texture: this.segmentationInputTexture_ },
[this.segmentationWidth_, this.segmentationHeight_]
);

const preprocessBindGroup = device.createBindGroup({
layout: this.preprocessPipeline_.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: this.sampler_,
},
{
binding: 1,
resource: {
buffer: this.inputTensorBuffer_,
},
},
{
binding: 2,
resource: this.segmentationInputTexture_.createView(),
},
],
});

const commandEncoder = device.createCommandEncoder();
const computePass = commandEncoder.beginComputePass();
computePass.setPipeline(this.preprocessPipeline_);
computePass.setBindGroup(0, preprocessBindGroup);
computePass.dispatch(
Math.ceil(this.segmentationWidth_ / 8),
Math.ceil(this.segmentationHeight_ / 8)
);

computePass.end();
device.queue.submit([commandEncoder.finish()]);

this.deeplab_.compute(this.inputTensorBuffer_, this.segmapBuffer_);
segmapBuffer = this.segmapBuffer_;
} else {
// use TF.js WebGPU backend
resultTensor = tf.tidy(() => {
let inputTensor = tf.browser.fromPixels(resizedVideoBitmap);
const inputShape = inputTensor.shape;
inputShape.unshift(1);
inputTensor = inputTensor.reshape(inputShape);
return this.deeplab_.predict(inputTensor);
});
segmapBuffer = tf.engine().backendInstance.getBuffer(resultTensor.dataId);
}
resizedVideoBitmap.close();
}

// Upload video frame to texture
const videoBitmap = await createImageBitmap(frame);
device.queue.copyExternalImageToTexture(
{ source: videoBitmap },
Expand All @@ -477,6 +562,7 @@ const batch = [4, 4];
videoBitmap.close();
const externalResource = this.cubeTexture_.createView();

// Blur
const blurBindGroup0 = device.createBindGroup({
layout: this.blurPipeline_.getBindGroupLayout(1),
entries: [
Expand Down Expand Up @@ -537,9 +623,8 @@ const batch = [4, 4];
],
});

let commandEncoder = device.createCommandEncoder();

let computePass = commandEncoder.beginComputePass();
const commandEncoder = device.createCommandEncoder();
const computePass = commandEncoder.beginComputePass();
computePass.setPipeline(this.blurPipeline_);
computePass.setBindGroup(0, this.computeConstants_);

Expand Down Expand Up @@ -570,47 +655,7 @@ const batch = [4, 4];
);
}

if (isSegmentBackground && !this.deeplab_) {
this.deeplab_ = new DeepLabV3MNV2Nchw()
this.hasWebNN_ = await this.deeplab_.init(this.device_);
if (!this.hasWebNN_) {
this.deeplab_ = null;
if (!this.isWorker_) {
this.blurBackgroundCheckbox_.checked = false;
}
}
}

if (isSegmentBackground && this.deeplab_) {
const resizedVideoBitmap = await createImageBitmap(
frame, {resizeWidth: this.segmentationWidth_, resizeHeight: this.segmentationHeight_});
device.queue.copyExternalImageToTexture(
{ source: resizedVideoBitmap },
{ texture: this.segmentationInputTexture_ },
[this.segmentationWidth_, this.segmentationHeight_]
);
resizedVideoBitmap.close();

const preprocessBindGroup = device.createBindGroup({
layout: this.preprocessPipeline_.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: this.sampler_,
},
{
binding: 1,
resource: {
buffer: this.inputTensorBuffer_,
},
},
{
binding: 2,
resource: this.segmentationInputTexture_.createView(),
},
],
});

if (isSegmentBackground) {
const segmentationBindBroup = device.createBindGroup({
layout: this.segmentationPipeline_.getBindGroupLayout(0),
entries: [
Expand All @@ -621,7 +666,7 @@ const batch = [4, 4];
{
binding: 1,
resource: {
buffer: this.segmapBuffer_,
buffer: segmapBuffer,
},
},
{
Expand All @@ -639,20 +684,6 @@ const batch = [4, 4];
],
});

computePass.setPipeline(this.preprocessPipeline_);
computePass.setBindGroup(0, preprocessBindGroup);
computePass.dispatch(
Math.ceil(this.segmentationWidth_ / 8),
Math.ceil(this.segmentationHeight_ / 8)
);

computePass.end();
device.queue.submit([commandEncoder.finish()]);

this.deeplab_.compute(this.inputTensorBuffer_, this.segmapBuffer_);

commandEncoder = device.createCommandEncoder();
computePass = commandEncoder.beginComputePass();
computePass.setPipeline(this.segmentationPipeline_);
computePass.setBindGroup(0, segmentationBindBroup);
computePass.dispatch(
Expand Down Expand Up @@ -696,6 +727,10 @@ const batch = [4, 4];

await device.queue.onSubmittedWorkDone();

if (resultTensor) {
resultTensor.dispose();
}

// Create a video frame from canvas and enqueue it to controller
// alpha: 'discard' is needed in order to send frames to a PeerConnection.
frame.close();
Expand All @@ -704,15 +739,15 @@ const batch = [4, 4];

/** @override */
destroy() {
if (this.device_) {
console.log('[WebGPUBackgroundBlurTransform] Destory WebGPU device.');
this.device_.destroy();
this.device_ = null;
if (this.deeplab_) {
this.deeplab_.dispose();
}
this.deeplab_ = null;
if (!this.isWorker_) {
this.gui_.destroy();
this.gui_ = null;
if (this.gui_) {
this.gui_.destroy();
}
}
this.gui_ = null;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -204,4 +204,7 @@ class DeepLabV3MNV2Nchw {
async compute(inputGPUBuffer, outputGPUBuffer) {
this.graph_.compute({'input': {resource: inputGPUBuffer}}, {'output': {resource: outputGPUBuffer}});
}

dispose() {
}
}

0 comments on commit 33b12ca

Please sign in to comment.