Skip to content

Commit

Permalink
JS code runnable in both Node and Chrome, minimizing platform-specifi…
Browse files Browse the repository at this point in the history
…c code. Works.
  • Loading branch information
John Owens committed Nov 1, 2024
1 parent c0f3b94 commit 83d5fd1
Show file tree
Hide file tree
Showing 4 changed files with 202 additions and 0 deletions.
14 changes: 14 additions & 0 deletions both_webgpu/both.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
<!DOCTYPE html>

<html>
<head>
<meta charset="utf-8" />
<title>Supporting Both Chrome And Node Sandbox</title>
</head>

<body>
<div id="plot"></div>
<script src="http://localhost:8000/webgpu-sandbox/both_webgpu/both_chrome.mjs" type="module"></script>
</script>
</body>
</html>
157 changes: 157 additions & 0 deletions both_webgpu/both.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
async function main(navigator) {
const adapter = await navigator.gpu.requestAdapter();
const hasSubgroups = adapter.features.has("subgroups");
const canTimestamp = adapter.features.has("timestamp-query");
const device = await adapter?.requestDevice({
requiredFeatures: [
...(canTimestamp ? ["timestamp-query"] : []),
...(hasSubgroups ? ["subgroups"] : []),
],
});

if (!device) {
fail("Fatal error: Device does not support WebGPU.");
}
console.log("I am main! (WebGPU)");

const workgroupSize = 64;
const memsrcSize = 2 ** 24;

const workgroupCount = memsrcSize / workgroupSize;
const dispatchGeometry = [workgroupCount, 1];
while (
dispatchGeometry[0] > adapter.limits.maxComputeWorkgroupsPerDimension
) {
dispatchGeometry[0] /= 2;
dispatchGeometry[1] *= 2;
}
console.log(`workgroup count: ${workgroupCount}
workgroup size: ${workgroupSize}
maxComputeWGPerDim: ${adapter.limits.maxComputeWorkgroupsPerDimension}
dispatchGeometry: ${dispatchGeometry}`);

const memsrc = new Uint32Array(memsrcSize);
for (let i = 0; i < memsrc.length; i++) {
memsrc[i] = i;
}

const memcpyModule = device.createShaderModule({
label: "copy large chunk of memory from memSrc to memDest",
code: /* wgsl */ `
/* output */
@group(0) @binding(0) var<storage, read_write> memDest: array<u32>;
/* input */
@group(0) @binding(1) var<storage, read> memSrc: array<u32>;
@compute @workgroup_size(${workgroupSize}) fn memcpyKernel(
@builtin(global_invocation_id) id: vec3u,
@builtin(num_workgroups) nwg: vec3u,
@builtin(workgroup_id) wgid: vec3u) {
let i = id.y * nwg.x * ${workgroupSize} + id.x;
memDest[i] = memSrc[i] + 1;
}
`,
});

const memcpyPipeline = device.createComputePipeline({
label: "memcpy compute pipeline",
layout: "auto",
compute: {
module: memcpyModule,
},
});

// create buffers on the GPU to hold data
// read-only inputs:
const memsrcBuffer = device.createBuffer({
label: "memory source buffer",
size: memsrc.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
device.queue.writeBuffer(memsrcBuffer, 0, memsrc);

const memdestBuffer = device.createBuffer({
label: "memory destination buffer",
size: memsrc.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});

const mappableMemdstBuffer = device.createBuffer({
label: "mappable memory destination buffer",
size: memsrc.byteLength,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});

/** Set up bindGroups per compute kernel to tell the shader which buffers to use */
const memcpyBindGroup = device.createBindGroup({
label: "bindGroup for memcpy kernel",
layout: memcpyPipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: memdestBuffer } },
{ binding: 1, resource: { buffer: memsrcBuffer } },
],
});

const encoder = device.createCommandEncoder({
label: "memcpy encoder",
});

const memcpyPass = encoder.beginComputePass({
label: "memcpy compute pass",
});
memcpyPass.setPipeline(memcpyPipeline);
memcpyPass.setBindGroup(0, memcpyBindGroup);
// TODO handle not evenly divisible by wgSize
memcpyPass.dispatchWorkgroups(...dispatchGeometry);
memcpyPass.end();

// Encode a command to copy the results to a mappable buffer.
// this is (from, to)
encoder.copyBufferToBuffer(
memdestBuffer,
0,
mappableMemdstBuffer,
0,
mappableMemdstBuffer.size
);

// Finish encoding and submit the commands
const commandBuffer = encoder.finish();
await device.queue.onSubmittedWorkDone();
const passStartTime = performance.now();
device.queue.submit([commandBuffer]);
await device.queue.onSubmittedWorkDone();
const passEndTime = performance.now();

// Read the results
await mappableMemdstBuffer.mapAsync(GPUMapMode.READ);
const memdest = new Uint32Array(
mappableMemdstBuffer.getMappedRange().slice()
);
mappableMemdstBuffer.unmap();
let errors = 0;
for (let i = 0; i < memdest.length; i++) {
if (memsrc[i] + 1 != memdest[i]) {
if (errors < 5) {
console.log(
`Error ${errors}: i=${i}, src=${memsrc[i]}, dest=${memdest[i]}`
);
}
errors++;
}
}
if (errors > 0) {
console.log(`Memdest size: ${memdest.length} | Errors: ${errors}`);
} else {
console.log(`Memdest size: ${memdest.length} | No errors!`);
}

let bytesTransferred = 2 * memdest.byteLength;
let ns = passEndTime - passStartTime;
console.log(
`Timing result: ${ns} ns; transferred ${bytesTransferred} bytes; bandwidth = ${
bytesTransferred / ns
} GB/s`
);
}
export { main };
9 changes: 9 additions & 0 deletions both_webgpu/both_chrome.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import { main } from "http://localhost:8000/webgpu-sandbox/both_webgpu/both.mjs";
if (typeof process !== "undefined" && process.release.name === "node") {
// running in Node
alert("Use this only from a web browser.");
} else {
// running in browser
}

main(navigator);
22 changes: 22 additions & 0 deletions both_webgpu/both_node.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"use strict";

// Loading a .node file into a recent version of Node is ... challenging
// https://stackoverflow.com/questions/77913169/loading-native-node-addons-from-es-module
import { createRequire } from "module";
const require = createRequire(import.meta.url);
const dawn = require("../../../src/dawn-build/dawn.node");
Object.assign(globalThis, dawn.globals); // Provides constants like GPUBufferUsage.MAP_READ

let navigator = {
gpu: dawn.create(["enable-dawn-features=use_user_defined_labels_in_backend"]),
};

import { main } from "./both.mjs";
if (typeof process !== "undefined" && process.release.name === "node") {
// running in Node
} else {
// running in browser
alert("Use this only in Node.");
}

main(navigator);

0 comments on commit 83d5fd1

Please sign in to comment.