diff --git a/meshes/stanfordDragon.ts b/meshes/stanfordDragon.ts
index f4d79cba..12b601b5 100644
--- a/meshes/stanfordDragon.ts
+++ b/meshes/stanfordDragon.ts
@@ -1,42 +1,44 @@
import dragonRawData from './stanfordDragonData';
-import { computeSurfaceNormals, computeProjectedPlaneUVs } from './utils';
+import { computeProjectedPlaneUVs, generateNormals } from './utils';
-export const mesh = {
- positions: dragonRawData.positions as [number, number, number][],
- triangles: dragonRawData.cells as [number, number, number][],
- normals: [] as [number, number, number][],
- uvs: [] as [number, number][],
-};
-
-// Compute surface normals
-mesh.normals = computeSurfaceNormals(mesh.positions, mesh.triangles);
+const { positions, normals, triangles } = generateNormals(
+ Math.PI,
+ dragonRawData.positions as [number, number, number][],
+ dragonRawData.cells as [number, number, number][]
+);
-// Compute some easy uvs for testing
-mesh.uvs = computeProjectedPlaneUVs(mesh.positions, 'xy');
+const uvs = computeProjectedPlaneUVs(positions, 'xy');
// Push indices for an additional ground plane
-mesh.triangles.push(
- [mesh.positions.length, mesh.positions.length + 2, mesh.positions.length + 1],
- [mesh.positions.length, mesh.positions.length + 1, mesh.positions.length + 3]
+triangles.push(
+ [positions.length, positions.length + 2, positions.length + 1],
+ [positions.length, positions.length + 1, positions.length + 3]
);
// Push vertex attributes for an additional ground plane
// prettier-ignore
-mesh.positions.push(
+positions.push(
[-100, 20, -100], //
[ 100, 20, 100], //
[-100, 20, 100], //
[ 100, 20, -100]
);
-mesh.normals.push(
+normals.push(
[0, 1, 0], //
[0, 1, 0], //
[0, 1, 0], //
[0, 1, 0]
);
-mesh.uvs.push(
+uvs.push(
[0, 0], //
[1, 1], //
[0, 1], //
[1, 0]
);
+
+export const mesh = {
+ positions,
+ triangles,
+ normals,
+ uvs,
+};
diff --git a/meshes/utils.ts b/meshes/utils.ts
index 05f135df..f7210393 100644
--- a/meshes/utils.ts
+++ b/meshes/utils.ts
@@ -1,4 +1,4 @@
-import { vec3 } from 'wgpu-matrix';
+import { vec3, Vec3 } from 'wgpu-matrix';
export function computeSurfaceNormals(
positions: [number, number, number][],
@@ -33,6 +33,170 @@ export function computeSurfaceNormals(
return normals;
}
+function makeTriangleIndicesFn(triangles: [number, number, number][]) {
+ let triNdx = 0;
+ let vNdx = 0;
+ const fn = function () {
+ const ndx = triangles[triNdx][vNdx++];
+ if (vNdx === 3) {
+ vNdx = 0;
+ ++triNdx;
+ }
+ return ndx;
+ };
+ fn.reset = function () {
+ triNdx = 0;
+ vNdx = 0;
+ };
+ fn.numElements = triangles.length * 3;
+ return fn;
+}
+
+// adapted from: https://webglfundamentals.org/webgl/lessons/webgl-3d-geometry-lathe.htmls
+export function generateNormals(
+ maxAngle: number,
+ positions: [number, number, number][],
+ triangles: [number, number, number][]
+) {
+ // first compute the normal of each face
+ const getNextIndex = makeTriangleIndicesFn(triangles);
+ const numFaceVerts = getNextIndex.numElements;
+ const numVerts = positions.length;
+ const numFaces = numFaceVerts / 3;
+ const faceNormals: Vec3[] = [];
+
+ // Compute the normal for every face.
+ // While doing that, create a new vertex for every face vertex
+ for (let i = 0; i < numFaces; ++i) {
+ const n1 = getNextIndex();
+ const n2 = getNextIndex();
+ const n3 = getNextIndex();
+
+ const v1 = positions[n1];
+ const v2 = positions[n2];
+ const v3 = positions[n3];
+
+ faceNormals.push(
+ vec3.normalize(vec3.cross(vec3.subtract(v2, v1), vec3.subtract(v3, v1)))
+ );
+ }
+
+ let tempVerts = {};
+ let tempVertNdx = 0;
+
+ // this assumes vertex positions are an exact match
+
+ function getVertIndex(vert: [number, number, number]): number {
+ const vertId = JSON.stringify(vert);
+ const ndx = tempVerts[vertId];
+ if (ndx !== undefined) {
+ return ndx;
+ }
+ const newNdx = tempVertNdx++;
+ tempVerts[vertId] = newNdx;
+ return newNdx;
+ }
+
+ // We need to figure out the shared vertices.
+ // It's not as simple as looking at the faces (triangles)
+ // because for example if we have a standard cylinder
+ //
+ //
+ // 3-4
+ // / \
+ // 2 5 Looking down a cylinder starting at S
+ // | | and going around to E, E and S are not
+ // 1 6 the same vertex in the data we have
+ // \ / as they don't share UV coords.
+ // S/E
+ //
+ // the vertices at the start and end do not share vertices
+ // since they have different UVs but if you don't consider
+ // them to share vertices they will get the wrong normals
+
+ const vertIndices: number[] = [];
+ for (let i = 0; i < numVerts; ++i) {
+ const vert = positions[i];
+ vertIndices.push(getVertIndex(vert));
+ }
+
+ // go through every vertex and record which faces it's on
+ const vertFaces: number[][] = [];
+ getNextIndex.reset();
+ for (let i = 0; i < numFaces; ++i) {
+ for (let j = 0; j < 3; ++j) {
+ const ndx = getNextIndex();
+ const sharedNdx = vertIndices[ndx];
+ let faces = vertFaces[sharedNdx];
+ if (!faces) {
+ faces = [];
+ vertFaces[sharedNdx] = faces;
+ }
+ faces.push(i);
+ }
+ }
+
+ // now go through every face and compute the normals for each
+ // vertex of the face. Only include faces that aren't more than
+ // maxAngle different. Add the result to arrays of newPositions,
+ // newTexcoords and newNormals, discarding any vertices that
+ // are the same.
+ tempVerts = {};
+ tempVertNdx = 0;
+ const newPositions: [number, number, number][] = [];
+ const newNormals: [number, number, number][] = [];
+
+ function getNewVertIndex(
+ position: [number, number, number],
+ normal: [number, number, number]
+ ) {
+ const vertId = JSON.stringify({ position, normal });
+ const ndx = tempVerts[vertId];
+ if (ndx !== undefined) {
+ return ndx;
+ }
+ const newNdx = tempVertNdx++;
+ tempVerts[vertId] = newNdx;
+ newPositions.push(position);
+ newNormals.push(normal);
+ return newNdx;
+ }
+
+ const newTriangles: [number, number, number][] = [];
+ getNextIndex.reset();
+ const maxAngleCos = Math.cos(maxAngle);
+ // for each face
+ for (let i = 0; i < numFaces; ++i) {
+ // get the normal for this face
+ const thisFaceNormal = faceNormals[i];
+ // for each vertex on the face
+ const newTriangle: number[] = [];
+ for (let j = 0; j < 3; ++j) {
+ const ndx = getNextIndex();
+ const sharedNdx = vertIndices[ndx];
+ const faces = vertFaces[sharedNdx];
+ const norm = [0, 0, 0] as [number, number, number];
+ faces.forEach((faceNdx: number) => {
+ // is this face facing the same way
+ const otherFaceNormal = faceNormals[faceNdx];
+ const dot = vec3.dot(thisFaceNormal, otherFaceNormal);
+ if (dot > maxAngleCos) {
+ vec3.add(norm, otherFaceNormal, norm);
+ }
+ });
+ vec3.normalize(norm, norm);
+ newTriangle.push(getNewVertIndex(positions[ndx], norm));
+ }
+ newTriangles.push(newTriangle as [number, number, number]);
+ }
+
+ return {
+ positions: newPositions,
+ normals: newNormals,
+ triangles: newTriangles,
+ };
+}
+
type ProjectedPlane = 'xy' | 'xz' | 'yz';
const projectedPlane2Ids: { [key in ProjectedPlane]: [number, number] } = {
diff --git a/sample/multipleCanvases/index.html b/sample/multipleCanvases/index.html
new file mode 100644
index 00000000..0fd68da8
--- /dev/null
+++ b/sample/multipleCanvases/index.html
@@ -0,0 +1,47 @@
+
+
+
+
+
+ webgpu-samples: multiple canvases
+
+
+
+
+
+
+
+
diff --git a/sample/multipleCanvases/main.ts b/sample/multipleCanvases/main.ts
new file mode 100644
index 00000000..45a1f542
--- /dev/null
+++ b/sample/multipleCanvases/main.ts
@@ -0,0 +1,360 @@
+/* eslint-disable prettier/prettier */
+import { mat4, mat3 } from 'wgpu-matrix';
+import { modelData } from './models';
+
+type TypedArrayView = Float32Array | Uint32Array;
+
+function createBufferWithData(
+ device: GPUDevice,
+ data: TypedArrayView,
+ usage: number
+) {
+ const buffer = device.createBuffer({
+ size: data.byteLength,
+ usage: usage,
+ });
+ device.queue.writeBuffer(buffer, 0, data);
+ return buffer;
+}
+
+type Model = {
+ vertexBuffer: GPUBuffer;
+ indexBuffer: GPUBuffer;
+ indexFormat: GPUIndexFormat;
+ vertexCount: number;
+};
+
+function createVertexAndIndexBuffer(
+ device: GPUDevice,
+ { vertices, indices }: { vertices: Float32Array, indices: Uint32Array },
+): Model {
+ const vertexBuffer = createBufferWithData(
+ device,
+ vertices,
+ GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
+ );
+ const indexBuffer = createBufferWithData(
+ device,
+ indices,
+ GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
+ );
+ return {
+ vertexBuffer,
+ indexBuffer,
+ indexFormat: 'uint32',
+ vertexCount: indices.length,
+ };
+}
+
+const adapter = await navigator.gpu.requestAdapter();
+const device = await adapter.requestDevice();
+
+const models = Object.values(modelData).map(data => createVertexAndIndexBuffer(device, data));
+
+function rand(min?: number, max?: number) {
+ if (min === undefined) {
+ max = 1;
+ min = 0;
+ } else if (max === undefined) {
+ max = min;
+ min = 0;
+ }
+ return Math.random() * (max - min) + min;
+}
+
+function randInt(min: number, max?: number) {
+ return Math.floor(rand(min, max));
+}
+
+function randColor() {
+ return [rand(), rand(), rand(), 1];
+}
+
+
+const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
+const depthFormat = 'depth24plus';
+
+const module = device.createShaderModule({
+ code: `
+ struct Uniforms {
+ worldViewProjectionMatrix: mat4x4f,
+ worldMatrix: mat4x4f,
+ color: vec4f,
+ };
+
+ struct Vertex {
+ @location(0) position: vec4f,
+ @location(1) normal: vec3f,
+ };
+
+ struct VSOut {
+ @builtin(position) position: vec4f,
+ @location(0) normal: vec3f,
+ };
+
+ @group(0) @binding(0) var uni: Uniforms;
+
+ @vertex fn vs(vin: Vertex) -> VSOut {
+ var vOut: VSOut;
+ vOut.position = uni.worldViewProjectionMatrix * vin.position;
+ vOut.normal = (uni.worldMatrix * vec4f(vin.normal, 0)).xyz;
+ return vOut;
+ }
+
+ @fragment fn fs(vin: VSOut) -> @location(0) vec4f {
+ let lightDirection = normalize(vec3f(4, 10, 6));
+ let light = dot(normalize(vin.normal), lightDirection) * 0.5 + 0.5;
+ return vec4f(uni.color.rgb * light, uni.color.a);
+ }
+ `,
+});
+
+const pipeline = device.createRenderPipeline({
+ label: 'our hardcoded red triangle pipeline',
+ layout: 'auto',
+ vertex: {
+ module,
+ buffers: [
+ {
+ arrayStride: 6 * 4, // position, normal
+ attributes: [
+ {
+ // position
+ shaderLocation: 0,
+ offset: 0,
+ format: 'float32x3',
+ },
+ {
+ // normal
+ shaderLocation: 1,
+ offset: 3 * 4,
+ format: 'float32x3',
+ },
+ ],
+ },
+ ],
+ },
+ fragment: {
+ module,
+ targets: [{ format: presentationFormat }],
+ },
+ primitive: {
+ cullMode: 'back',
+ },
+ depthStencil: {
+ depthWriteEnabled: true,
+ depthCompare: 'less',
+ format: depthFormat,
+ },
+});
+
+const resizeObserver = new ResizeObserver((entries) => {
+ for (const entry of entries) {
+ const canvas = entry.target as HTMLCanvasElement;
+ const width = entry.contentBoxSize[0].inlineSize;
+ const height = entry.contentBoxSize[0].blockSize;
+ canvas.width = Math.max(
+ 1,
+ Math.min(width, device.limits.maxTextureDimension2D)
+ );
+ canvas.height = Math.max(
+ 1,
+ Math.min(height, device.limits.maxTextureDimension2D)
+ );
+ }
+});
+
+const visibleCanvasSet = new Set();
+const intersectionObserver = new IntersectionObserver((entries) => {
+ for (const { target, isIntersecting } of entries) {
+ const canvas = target as HTMLCanvasElement;
+ if (isIntersecting) {
+ visibleCanvasSet.add(canvas);
+ } else {
+ visibleCanvasSet.delete(canvas);
+ }
+ }
+});
+
+type CanvasInfo = {
+ context: GPUCanvasContext;
+ depthTexture?: GPUTexture;
+ clearValue: number[];
+ worldViewProjectionMatrixValue: Float32Array;
+ worldMatrixValue: Float32Array;
+ uniformValues: Float32Array;
+ uniformBuffer: GPUBuffer;
+ bindGroup: GPUBindGroup;
+ rotation: number;
+ model: Model;
+};
+
+const outerElem = document.querySelector('#outer');
+const canvasToInfoMap = new Map();
+const numProducts = 200;
+for (let i = 0; i < numProducts; ++i) {
+ // making this
+ //
+ //
+ //
Product#: ?
+ //
+ const canvas = document.createElement('canvas');
+ resizeObserver.observe(canvas);
+ intersectionObserver.observe(canvas);
+
+ const container = document.createElement('div');
+ container.className = `product size${randInt(4)}`;
+
+ const description = document.createElement('div');
+ description.textContent = `product#: ${i + 1}`;
+
+ container.appendChild(canvas);
+ container.appendChild(description);
+ outerElem.appendChild(container);
+
+ // Get a WebGPU context and configure it.
+ const context = canvas.getContext('webgpu');
+ context.configure({
+ device,
+ format: presentationFormat,
+ });
+
+ // Make a uniform buffer and type array views
+ // for our uniforms.
+ const uniformValues = new Float32Array(16 + 16 + 4);
+ const uniformBuffer = device.createBuffer({
+ size: uniformValues.byteLength,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+ });
+ const kWorldViewProjectionMatrixOffset = 0;
+ const kWorldMatrixOffset = 16;
+ const kColorOffset = 32;
+ const worldViewProjectionMatrixValue = uniformValues.subarray(
+ kWorldViewProjectionMatrixOffset,
+ kWorldViewProjectionMatrixOffset + 16);
+ const worldMatrixValue = uniformValues.subarray(
+ kWorldMatrixOffset,
+ kWorldMatrixOffset + 15
+ );
+ const colorValue = uniformValues.subarray(kColorOffset, kColorOffset + 4);
+ colorValue.set(randColor());
+
+ // Make a bind group for this uniform
+ const bindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: uniformBuffer } }],
+ });
+
+ canvasToInfoMap.set(canvas, {
+ context,
+ clearValue: randColor(),
+ worldViewProjectionMatrixValue,
+ worldMatrixValue,
+ uniformValues,
+ uniformBuffer,
+ bindGroup,
+ rotation: rand(Math.PI * 2),
+ model: models[randInt(models.length)],
+ });
+}
+
+const renderPassDescriptor: GPURenderPassDescriptor = {
+ label: 'our basic canvas renderPass',
+ colorAttachments: [
+ {
+ view: undefined, // <- to be filled out when we render
+ clearValue: [0.3, 0.3, 0.3, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment: {
+ view: undefined, // <- to be filled out when we render
+ depthClearValue: 1.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ },
+};
+
+function render(time: number) {
+ time *= 0.001; // convert to seconds;
+
+ // make a command encoder to start encoding commands
+ const encoder = device.createCommandEncoder();
+
+ visibleCanvasSet.forEach((canvas) => {
+ const canvasInfo = canvasToInfoMap.get(canvas);
+ const {
+ context,
+ uniformBuffer,
+ uniformValues,
+ worldViewProjectionMatrixValue,
+ worldMatrixValue,
+ bindGroup,
+ clearValue,
+ rotation,
+ model: { vertexBuffer, indexBuffer, indexFormat, vertexCount },
+ } = canvasInfo;
+ let { depthTexture } = canvasInfo;
+
+ // Get the current texture from the canvas context and
+ // set it as the texture to render to.
+ const canvasTexture = context.getCurrentTexture();
+ renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
+ renderPassDescriptor.colorAttachments[0].clearValue = clearValue;
+
+ // If we don't have a depth texture OR if its size is different
+ // from the canvasTexture when make a new depth texture
+ if (
+ !depthTexture ||
+ depthTexture.width !== canvasTexture.width ||
+ depthTexture.height !== canvasTexture.height
+ ) {
+ if (depthTexture) {
+ depthTexture.destroy();
+ }
+ depthTexture = device.createTexture({
+ size: [canvasTexture.width, canvasTexture.height],
+ format: 'depth24plus',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ canvasInfo.depthTexture = depthTexture;
+ }
+ renderPassDescriptor.depthStencilAttachment.view =
+ depthTexture.createView();
+
+ const fov = (60 * Math.PI) / 180;
+ const aspect = canvas.clientWidth / canvas.clientHeight;
+ const projection = mat4.perspective(fov, aspect, 0.1, 100);
+
+ const view = mat4.lookAt(
+ [0, 30, 50], // eye
+ [0, 0, 0], // target
+ [0, 1, 0] // up
+ );
+
+ const viewProjection = mat4.multiply(projection, view);
+
+ const world = mat4.rotationY(time * 0.1 + rotation);
+ mat4.multiply(viewProjection, world, worldViewProjectionMatrixValue);
+ mat3.fromMat4(world, worldMatrixValue);
+
+ // Upload our uniform values.
+ device.queue.writeBuffer(uniformBuffer, 0, uniformValues);
+
+ // make a render pass encoder to encode render specific commands
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.setVertexBuffer(0, vertexBuffer);
+ pass.setIndexBuffer(indexBuffer, indexFormat);
+ pass.setBindGroup(0, bindGroup);
+ pass.drawIndexed(vertexCount);
+ pass.end();
+ });
+
+ const commandBuffer = encoder.finish();
+ device.queue.submit([commandBuffer]);
+
+ requestAnimationFrame(render);
+}
+requestAnimationFrame(render);
diff --git a/sample/multipleCanvases/meta.ts b/sample/multipleCanvases/meta.ts
new file mode 100644
index 00000000..34f4581f
--- /dev/null
+++ b/sample/multipleCanvases/meta.ts
@@ -0,0 +1,14 @@
+export default {
+ name: 'Multiple Canvases',
+ description: `\
+This example shows rendering to multiple canvases with a single WebGPU device and using \`IntersectionObserver\`
+to only render to visible canvases.
+
+For more info [see this article](https://webgpufundamentals.org/webgpu/lessons/webgpu-multiple-canvases.html).`,
+ filename: __DIRNAME__,
+ sources: [
+ { path: 'main.ts' },
+ { path: 'models.ts' },
+ { path: 'solidColorLit.wgsl' },
+ ],
+};
diff --git a/sample/multipleCanvases/models.ts b/sample/multipleCanvases/models.ts
new file mode 100644
index 00000000..4633e37b
--- /dev/null
+++ b/sample/multipleCanvases/models.ts
@@ -0,0 +1,114 @@
+// Ideally all the models would be the same format
+// and we'd determine that format at build time or before
+// but, we want to reuse the model data in this repo
+// so we'll normalize it here
+
+import { vec3 } from 'wgpu-matrix';
+import { mesh as teapot } from '../../meshes/teapot';
+import { mesh as dragon } from '../../meshes/stanfordDragon';
+import { createSphereMesh } from '../../meshes/sphere';
+
+type Mesh = {
+ positions: [number, number, number][];
+ triangles: [number, number, number][];
+ normals: [number, number, number][];
+};
+
+export function convertMeshToTypedArrays(
+ mesh: Mesh,
+ scale: number,
+ offset = [0, 0, 0]
+) {
+ const { positions, normals, triangles } = mesh;
+ const scaledPositions = positions.map((p) =>
+ p.map((v, i) => v * scale + offset[i % 3])
+ );
+ const vertices = new Float32Array(scaledPositions.length * 6);
+ for (let i = 0; i < scaledPositions.length; ++i) {
+ vertices.set(scaledPositions[i], 6 * i);
+ vertices.set(normals[i], 6 * i + 3);
+ }
+ const indices = new Uint32Array(triangles.length * 3);
+ for (let i = 0; i < triangles.length; ++i) {
+ indices.set(triangles[i], 3 * i);
+ }
+
+ return {
+ vertices,
+ indices,
+ };
+}
+
+function createSphereTypedArrays(
+ radius: number,
+ widthSegments = 32,
+ heightSegments = 16,
+ randomness = 0
+) {
+ const { vertices: verticesWithUVs, indices } = createSphereMesh(
+ radius,
+ widthSegments,
+ heightSegments,
+ randomness
+ );
+ const numVertices = verticesWithUVs.length / 8;
+ const vertices = new Float32Array(numVertices * 6);
+ for (let i = 0; i < numVertices; ++i) {
+ const srcNdx = i * 8;
+ const dstNdx = i * 6;
+ vertices.set(verticesWithUVs.subarray(srcNdx, srcNdx + 6), dstNdx);
+ }
+ return {
+ vertices,
+ indices: new Uint32Array(indices),
+ };
+}
+
+function flattenNormals({
+ vertices,
+ indices,
+}: {
+ vertices: Float32Array;
+ indices: Uint32Array;
+}) {
+ const newVertices = new Float32Array(indices.length * 6);
+ const newIndices = new Uint32Array(indices.length);
+ for (let i = 0; i < indices.length; i += 3) {
+ const positions = [];
+ for (let j = 0; j < 3; ++j) {
+ const ndx = indices[i + j];
+ const srcNdx = ndx * 6;
+ const dstNdx = (i + j) * 6;
+ // copy position
+ const pos = vertices.subarray(srcNdx, srcNdx + 3);
+ newVertices.set(pos, dstNdx);
+ positions.push(pos);
+ newIndices[i + j] = i + j;
+ }
+
+ const normal = vec3.normalize(
+ vec3.cross(
+ vec3.normalize(vec3.subtract(positions[1], positions[0])),
+ vec3.normalize(vec3.subtract(positions[2], positions[1]))
+ )
+ );
+
+ for (let j = 0; j < 3; ++j) {
+ const dstNdx = (i + j) * 6;
+ newVertices.set(normal, dstNdx + 3);
+ }
+ }
+
+ return {
+ vertices: newVertices,
+ indices: newIndices,
+ };
+}
+
+export const modelData = {
+ teapot: convertMeshToTypedArrays(teapot, 1.5),
+ dragon: convertMeshToTypedArrays(dragon, 0.5, [0, -25, 0]),
+ sphere: flattenNormals(createSphereTypedArrays(20)),
+ jewel: flattenNormals(createSphereTypedArrays(20, 5, 3)),
+ rock: flattenNormals(createSphereTypedArrays(20, 32, 16, 0.1)),
+};
diff --git a/sample/multipleCanvases/solidColorLit.wgsl b/sample/multipleCanvases/solidColorLit.wgsl
new file mode 100644
index 00000000..c29c3f6c
--- /dev/null
+++ b/sample/multipleCanvases/solidColorLit.wgsl
@@ -0,0 +1,30 @@
+struct Uniforms {
+ worldViewProjectionMatrix: mat4x4f,
+ worldMatrix: mat4x4f,
+ color: vec4f,
+};
+
+struct Vertex {
+ @location(0) position: vec4f,
+ @location(1) normal: vec3f,
+};
+
+struct VSOut {
+ @builtin(position) position: vec4f,
+ @location(0) normal: vec3f,
+};
+
+@group(0) @binding(0) var uni: Uniforms;
+
+@vertex fn vs(vin: Vertex) -> VSOut {
+ var vOut: VSOut;
+ vOut.position = uni.worldViewProjectionMatrix * vin.position;
+ vOut.normal = (uni.worldMatrix * vec4f(vin.normal, 0)).xyz;
+ return vOut;
+}
+
+@fragment fn fs(vin: VSOut) -> @location(0) vec4f {
+ let lightDirection = normalize(vec3f(4, 10, 6));
+ let light = dot(normalize(vin.normal), lightDirection) * 0.5 + 0.5;
+ return vec4f(uni.color.rgb * light, uni.color.a);
+}
diff --git a/src/samples.ts b/src/samples.ts
index 3b33179e..8d49deb8 100644
--- a/src/samples.ts
+++ b/src/samples.ts
@@ -15,6 +15,7 @@ import helloTriangleMSAA from '../sample/helloTriangleMSAA/meta';
import imageBlur from '../sample/imageBlur/meta';
import instancedCube from '../sample/instancedCube/meta';
import metaballs from '../sample/metaballs/meta';
+import multipleCanvases from '../sample/multipleCanvases/meta';
import normalMap from '../sample/normalMap/meta';
import particles from '../sample/particles/meta';
import points from '../sample/points/meta';
@@ -132,6 +133,7 @@ export const pageCategories: PageCategory[] = [
samples: {
resizeCanvas,
resizeObserverHDDPI,
+ multipleCanvases,
videoUploading,
worker,
},