diff --git a/.eslintrc.cjs b/.eslintrc.cjs
new file mode 100644
index 00000000..6a64ad33
--- /dev/null
+++ b/.eslintrc.cjs
@@ -0,0 +1,14 @@
+module.exports = {
+  parser: '@typescript-eslint/parser',
+  extends: [
+    'plugin:@typescript-eslint/recommended',
+    'plugin:prettier/recommended',
+  ],
+  plugins: ['@typescript-eslint', 'eslint-plugin-html', 'prettier'],
+  rules: {
+    '@typescript-eslint/no-unused-vars': [
+      'error',
+      { vars: 'all', args: 'after-used', ignoreRestSiblings: true },
+    ],
+  },
+};
diff --git a/.eslintrc.js b/.eslintrc.js
deleted file mode 100644
index 96d162bc..00000000
--- a/.eslintrc.js
+++ /dev/null
@@ -1,27 +0,0 @@
-module.exports = {
-  parser: "@typescript-eslint/parser",
-  extends: [
-    "plugin:@typescript-eslint/recommended",
-    "plugin:react/recommended",
-    "plugin:prettier/recommended",
-    'plugin:@next/next/recommended',
-  ],
-  plugins: [
-    "@typescript-eslint",
-    "react",
-    "prettier"
-  ],
-  rules: {
-    "react/react-in-jsx-scope": "off",
-    "react/prop-types": "off",
-    "@typescript-eslint/no-unused-vars": ["error", { "vars": "all", "args": "after-used", "ignoreRestSiblings": true }]
-  },
-  globals: {
-    React: "writable"
-  },
-  settings: {
-    react: {
-      version: "detect"
-    }
-  }
-};
diff --git a/.github/workflows/build-and-deploy.yml b/.github/workflows/build-and-deploy.yml
index 60a7fa37..ca587f16 100644
--- a/.github/workflows/build-and-deploy.yml
+++ b/.github/workflows/build-and-deploy.yml
@@ -29,7 +29,6 @@ jobs:
           npm ci
           npm run-script lint
           npm run-script build
-          npm run-script export
           touch out/.nojekyll
 
       - name: Deploy 🚀
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 4284c4c8..030e6f86 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -32,4 +32,3 @@ jobs:
           npm ci
           npm run-script lint
           npm run-script build
-          npm run-script export
diff --git a/.prettierrc.js b/.prettierrc.cjs
similarity index 100%
rename from .prettierrc.js
rename to .prettierrc.cjs
diff --git a/build/lib/copyAndWatch.js b/build/lib/copyAndWatch.js
new file mode 100644
index 00000000..626a0abb
--- /dev/null
+++ b/build/lib/copyAndWatch.js
@@ -0,0 +1,62 @@
+import chokidar from 'chokidar';
+import fs from 'fs';
+import path from 'path';
+
+const debug = console.log; //() => {};
+const removeLeadingSlash = (s) => s.replace(/^\//, '');
+
+/**
+ * Recursively copies files and watches for changes.
+ *
+ * Example:
+ *
+ *    copyAndWatch([
+ *      {src: "src\/**\/*.js", srcPrefix: "src", dst: "out"},   // would copy src/bar/moo.js -> out/bar/moo.js
+ *      {src: "index.html", dst: "out"},                        // copies index.html -> out/index.html
+ *    ]);
+ *
+ * @param {*} paths [{src: glob, srcPrefix: string, dst: string }]
+ * @param {*} options { watch: true/false }  // watch: false = just copy and exit.
+ */
+export function copyAndWatch(paths, { watch } = { watch: true }) {
+  for (const { src, srcPrefix, dst } of paths) {
+    const watcher = chokidar.watch(src, {
+      ignored: /(^|[\/\\])\../, // ignore dot files
+      persistent: watch,
+    });
+
+    const makeDstPath = (path, dst) =>
+      `${dst}/${removeLeadingSlash(
+        path.startsWith(srcPrefix) ? path.substring(srcPrefix.length) : path
+      )}`;
+
+    watcher
+      .on('addDir', (srcPath) => {
+        const dstPath = makeDstPath(srcPath, dst);
+        debug('addDir:', srcPath, dstPath);
+        fs.mkdirSync(dstPath, { recursive: true });
+      })
+      .on('add', (srcPath) => {
+        const dstPath = makeDstPath(srcPath, dst);
+        const dir = path.dirname(dstPath);
+        fs.mkdirSync(dir, { recursive: true });
+        debug('add:', srcPath, dstPath);
+        fs.copyFileSync(srcPath, dstPath);
+      })
+      .on('change', (srcPath) => {
+        const dstPath = makeDstPath(srcPath, dst);
+        debug('change:', srcPath, dstPath);
+        fs.copyFileSync(srcPath, dstPath);
+      })
+      .on('unlink', (srcPath) => {
+        const dstPath = makeDstPath(srcPath, dst);
+        debug('unlink:', srcPath, dstPath);
+        fs.unlinkSync(dstPath);
+      })
+      .on('ready', () => {
+        if (!watch) {
+          watcher.close();
+        }
+      });
+  }
+}
diff --git a/build/lib/readdir.js b/build/lib/readdir.js
new file mode 100644
index 00000000..4ce688fa
--- /dev/null
+++ b/build/lib/readdir.js
@@ -0,0 +1,16 @@
+import fs from 'fs';
+import path from 'path';
+
+// not needed in node v20+
+export function readDirSyncRecursive(dir) {
+  const basename = path.basename(dir);
+  const entries = fs.readdirSync(dir, { withFileTypes: true });
+  return entries
+    .map((entry) =>
+      entry.isDirectory()
+        ? readDirSyncRecursive(`${dir}/${entry.name}`)
+        : entry.name
+    )
+    .flat()
+    .map((name) => `${basename}/${name}`);
+}
diff --git a/build/tools/build.js b/build/tools/build.js
new file mode 100644
index 00000000..2996824e
--- /dev/null
+++ b/build/tools/build.js
@@ -0,0 +1,14 @@
+import { spawn } from 'child_process';
+import { mkdirSync } from 'fs';
+
+mkdirSync('out', { recursive: true });
+
+spawn('node', ['build/tools/copy.js'], {
+  shell: true,
+  stdio: 'inherit',
+});
+
+spawn('./node_modules/.bin/rollup', ['-c'], {
+  shell: true,
+  stdio: 'inherit',
+});
diff --git a/build/tools/copy.js b/build/tools/copy.js
new file mode 100644
index 00000000..456f7e90
--- /dev/null
+++ b/build/tools/copy.js
@@ -0,0 +1,14 @@
+import { copyAndWatch } from '../lib/copyAndWatch.js';
+
+const watch = !!process.argv[2];
+
+copyAndWatch(
+  [
+    { src: 'public/**/*', srcPrefix: 'public', dst: 'out' },
+    { src: 'meshes/**/*', dst: 'out' },
+    { src: 'sample/**/*', dst: 'out' },
+    { src: 'shaders/**/*', dst: 'out' },
+    { src: 'index.html', dst: 'out' },
+  ],
+  { watch }
+);
diff --git a/build/tools/serve.js b/build/tools/serve.js
new file mode 100644
index 00000000..fa6cb45f
--- /dev/null
+++ b/build/tools/serve.js
@@ -0,0 +1,19 @@
+import { spawn } from 'child_process';
+import { mkdirSync } from 'fs';
+
+mkdirSync('out', { recursive: true });
+
+spawn('npm', ['run', 'watch'], {
+  shell: true,
+  stdio: 'inherit',
+});
+
+spawn('node', ['build/tools/copy.js', '1'], {
+  shell: true,
+  stdio: 'inherit',
+});
+
+spawn('./node_modules/.bin/servez', ['out'], {
+  shell: true,
+  stdio: 'inherit',
+});
diff --git a/index.html b/index.html
new file mode 100644
index 00000000..bcf97654
--- /dev/null
+++ b/index.html
@@ -0,0 +1,86 @@
+
+
+  
+    WebGPU Samples 
+    
+      
+        
+        
+        
+          
+              Github
+           
+          
+          
+          
+          
Other Pages 
+          
+        
 
+
+      
+        
+          
+            The WebGPU Samples are a set of samples and demos demonstrating the use
+            of the WebGPU API . Please see the current
+            implementation status and how to run WebGPU in your browser at
+            webgpu.io .
+          
+        
+          
+         
+        
+       
+    
) -> @location(0) vec4 {
   }
 
   return color;
-}
\ No newline at end of file
+}
diff --git a/sample/a-buffer/index.html b/sample/a-buffer/index.html
new file mode 100644
index 00000000..9dd88227
--- /dev/null
+++ b/sample/a-buffer/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: a-buffer 
+    
+    
+    
+  
+  
+    
+// * maxStorableFragments: u32
+// * targetWidth: u32
+const uniformsSize = roundUp(
+  16 * Float32Array.BYTES_PER_ELEMENT + 2 * Uint32Array.BYTES_PER_ELEMENT,
+  16
+);
+
+const uniformBuffer = device.createBuffer({
+  size: uniformsSize,
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+  label: 'uniformBuffer',
+});
+
+const opaqueModule = device.createShaderModule({
+  code: opaqueWGSL,
+  label: 'opaqueModule',
+});
+
+const opaquePipeline = device.createRenderPipeline({
+  layout: 'auto',
+  vertex: {
+    module: opaqueModule,
+    entryPoint: 'main_vs',
+    buffers: [
+      {
+        arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
+        attributes: [
+          {
+            // position
+            format: 'float32x3',
+            offset: 0,
+            shaderLocation: 0,
+          },
+        ],
+      },
+    ],
+  },
+  fragment: {
+    module: opaqueModule,
+    entryPoint: 'main_fs',
+    targets: [
+      {
+        format: presentationFormat,
+      },
+    ],
+  },
+  primitive: {
+    topology: 'triangle-list',
+  },
+  depthStencil: {
+    depthWriteEnabled: true,
+    depthCompare: 'less',
+    format: 'depth24plus',
+  },
+  label: 'opaquePipeline',
+});
+
+const opaquePassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      view: undefined,
+      clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+  ],
+  depthStencilAttachment: {
+    view: undefined,
+    depthClearValue: 1.0,
+    depthLoadOp: 'clear',
+    depthStoreOp: 'store',
+  },
+  label: 'opaquePassDescriptor',
+};
+
+const opaqueBindGroup = device.createBindGroup({
+  layout: opaquePipeline.getBindGroupLayout(0),
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: uniformBuffer,
+        size: 16 * Float32Array.BYTES_PER_ELEMENT,
+        label: 'modelViewProjection',
+      },
+    },
+  ],
+  label: 'opaquePipeline',
+});
+
+const translucentModule = device.createShaderModule({
+  code: translucentWGSL,
+  label: 'translucentModule',
+});
+
+const translucentBindGroupLayout = device.createBindGroupLayout({
+  label: 'translucentBindGroupLayout',
+  entries: [
+    {
+      binding: 0,
+      visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'uniform',
+      },
+    },
+    {
+      binding: 1,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'storage',
+      },
+    },
+    {
+      binding: 2,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'storage',
+      },
+    },
+    {
+      binding: 3,
+      visibility: GPUShaderStage.FRAGMENT,
+      texture: { sampleType: 'depth' },
+    },
+    {
+      binding: 4,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'uniform',
+        hasDynamicOffset: true,
+      },
+    },
+  ],
+});
+
+const translucentPipeline = device.createRenderPipeline({
+  layout: device.createPipelineLayout({
+    bindGroupLayouts: [translucentBindGroupLayout],
+    label: 'translucentPipelineLayout',
+  }),
+  vertex: {
+    module: translucentModule,
+    entryPoint: 'main_vs',
+    buffers: [
+      {
+        arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
+        attributes: [
+          {
+            format: 'float32x3',
+            offset: 0,
+            shaderLocation: 0,
+          },
+        ],
+      },
+    ],
+  },
+  fragment: {
+    module: translucentModule,
+    entryPoint: 'main_fs',
+    targets: [
+      {
+        format: presentationFormat,
+        writeMask: 0x0,
+      },
+    ],
+  },
+  primitive: {
+    topology: 'triangle-list',
+  },
+  label: 'translucentPipeline',
+});
+
+const translucentPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      loadOp: 'load',
+      storeOp: 'store',
+      view: undefined,
+    },
+  ],
+  label: 'translucentPassDescriptor',
+};
+
+const compositeModule = device.createShaderModule({
+  code: compositeWGSL,
+  label: 'compositeModule',
+});
+
+const compositeBindGroupLayout = device.createBindGroupLayout({
+  label: 'compositeBindGroupLayout',
+  entries: [
+    {
+      binding: 0,
+      visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'uniform',
+      },
+    },
+    {
+      binding: 1,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'storage',
+      },
+    },
+    {
+      binding: 2,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'storage',
+      },
+    },
+    {
+      binding: 3,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'uniform',
+        hasDynamicOffset: true,
+      },
+    },
+  ],
+});
+
+const compositePipeline = device.createRenderPipeline({
+  layout: device.createPipelineLayout({
+    bindGroupLayouts: [compositeBindGroupLayout],
+    label: 'compositePipelineLayout',
+  }),
+  vertex: {
+    module: compositeModule,
+    entryPoint: 'main_vs',
+  },
+  fragment: {
+    module: compositeModule,
+    entryPoint: 'main_fs',
+    targets: [
+      {
+        format: presentationFormat,
+        blend: {
+          color: {
+            srcFactor: 'one',
+            operation: 'add',
+            dstFactor: 'one-minus-src-alpha',
+          },
+          alpha: {},
+        },
+      },
+    ],
+  },
+  primitive: {
+    topology: 'triangle-list',
+  },
+  label: 'compositePipeline',
+});
+
+const compositePassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      view: undefined,
+      loadOp: 'load',
+      storeOp: 'store',
+    },
+  ],
+  label: 'compositePassDescriptor',
+};
+
+const configure = () => {
+  let devicePixelRatio = window.devicePixelRatio;
+
+  // The default maximum storage buffer binding size is 128Mib. The amount
+  // of memory we need to store transparent fragments depends on the size
+  // of the canvas and the average number of layers per fragment we want to
+  // support. When the devicePixelRatio is 1, we know that 128Mib is enough
+  // to store 4 layers per pixel at 600x600. However, when the device pixel
+  // ratio is high enough we will exceed this limit.
+  //
+  // We provide 2 choices of mitigations to this issue:
+  // 1) Clamp the device pixel ratio to a value which we know will not break
+  //    the limit. The tradeoff here is that the canvas resolution will not
+  //    match the native resolution and therefore may have a reduction in
+  //    quality.
+  // 2) Break the frame into a series of horizontal slices using the scissor
+  //    functionality and process a single slice at a time. This limits memory
+  //    usage because we only need enough memory to process the dimensions
+  //    of the slice. The tradeoff is the performance reduction due to multiple
+  //    passes.
+  if (settings.memoryStrategy === 'clamp-pixel-ratio') {
+    devicePixelRatio = Math.min(window.devicePixelRatio, 3);
+  }
+
+  canvas.width = canvas.clientWidth * devicePixelRatio;
+  canvas.height = canvas.clientHeight * devicePixelRatio;
+
+  const depthTexture = device.createTexture({
+    size: [canvas.width, canvas.height],
+    format: 'depth24plus',
+    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+    label: 'depthTexture',
+  });
+
+  const depthTextureView = depthTexture.createView({
+    label: 'depthTextureView',
+  });
+
+  // Determines how much memory is allocated to store linked-list elements
+  const averageLayersPerFragment = 4;
+
+  // Each element stores
+  // * color : vec4
+  // * depth : f32
+  // * index of next element in the list : u32
+  const linkedListElementSize =
+    5 * Float32Array.BYTES_PER_ELEMENT + 1 * Uint32Array.BYTES_PER_ELEMENT;
+
+  // We want to keep the linked-list buffer size under the maxStorageBufferBindingSize.
+  // Split the frame into enough slices to meet that constraint.
+  const bytesPerline =
+    canvas.width * averageLayersPerFragment * linkedListElementSize;
+  const maxLinesSupported = Math.floor(
+    device.limits.maxStorageBufferBindingSize / bytesPerline
+  );
+  const numSlices = Math.ceil(canvas.height / maxLinesSupported);
+  const sliceHeight = Math.ceil(canvas.height / numSlices);
+  const linkedListBufferSize = sliceHeight * bytesPerline;
+
+  const linkedListBuffer = device.createBuffer({
+    size: linkedListBufferSize,
+    usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
+    label: 'linkedListBuffer',
+  });
+
+  // To slice up the frame we need to pass the starting fragment y position of the slice.
+  // We do this using a uniform buffer with a dynamic offset.
+  const sliceInfoBuffer = device.createBuffer({
+    size: numSlices * device.limits.minUniformBufferOffsetAlignment,
+    usage: GPUBufferUsage.UNIFORM,
+    mappedAtCreation: true,
+    label: 'sliceInfoBuffer',
+  });
+  {
+    const mapping = new Int32Array(sliceInfoBuffer.getMappedRange());
+
+    // This assumes minUniformBufferOffsetAlignment is a multiple of 4
+    const stride =
+      device.limits.minUniformBufferOffsetAlignment /
+      Int32Array.BYTES_PER_ELEMENT;
+    for (let i = 0; i < numSlices; ++i) {
+      mapping[i * stride] = i * sliceHeight;
+    }
+    sliceInfoBuffer.unmap();
+  }
+
+  // `Heads` struct contains the start index of the linked-list of translucent fragments
+  // for a given pixel.
+  // * numFragments : u32
+  // * data : array
+  const headsBuffer = device.createBuffer({
+    size: (1 + canvas.width * sliceHeight) * Uint32Array.BYTES_PER_ELEMENT,
+    usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
+    label: 'headsBuffer',
+  });
+
+  const headsInitBuffer = device.createBuffer({
+    size: (1 + canvas.width * sliceHeight) * Uint32Array.BYTES_PER_ELEMENT,
+    usage: GPUBufferUsage.COPY_SRC,
+    mappedAtCreation: true,
+    label: 'headsInitBuffer',
+  });
+  {
+    const buffer = new Uint32Array(headsInitBuffer.getMappedRange());
+
+    for (let i = 0; i < buffer.length; ++i) {
+      buffer[i] = 0xffffffff;
+    }
+
+    headsInitBuffer.unmap();
+  }
+
+  const translucentBindGroup = device.createBindGroup({
+    layout: translucentBindGroupLayout,
+    entries: [
+      {
+        binding: 0,
+        resource: {
+          buffer: uniformBuffer,
+          label: 'uniforms',
+        },
+      },
+      {
+        binding: 1,
+        resource: {
+          buffer: headsBuffer,
+          label: 'headsBuffer',
+        },
+      },
+      {
+        binding: 2,
+        resource: {
+          buffer: linkedListBuffer,
+          label: 'linkedListBuffer',
+        },
+      },
+      {
+        binding: 3,
+        resource: depthTextureView,
+      },
+      {
+        binding: 4,
+        resource: {
+          buffer: sliceInfoBuffer,
+          size: device.limits.minUniformBufferOffsetAlignment,
+          label: 'sliceInfoBuffer',
+        },
+      },
+    ],
+    label: 'translucentBindGroup',
+  });
+
+  const compositeBindGroup = device.createBindGroup({
+    layout: compositePipeline.getBindGroupLayout(0),
+    entries: [
+      {
+        binding: 0,
+        resource: {
+          buffer: uniformBuffer,
+          label: 'uniforms',
+        },
+      },
+      {
+        binding: 1,
+        resource: {
+          buffer: headsBuffer,
+          label: 'headsBuffer',
+        },
+      },
+      {
+        binding: 2,
+        resource: {
+          buffer: linkedListBuffer,
+          label: 'linkedListBuffer',
+        },
+      },
+      {
+        binding: 3,
+        resource: {
+          buffer: sliceInfoBuffer,
+          size: device.limits.minUniformBufferOffsetAlignment,
+          label: 'sliceInfoBuffer',
+        },
+      },
+    ],
+  });
+
+  opaquePassDescriptor.depthStencilAttachment.view = depthTextureView;
+
+  // Rotates the camera around the origin based on time.
+  function getCameraViewProjMatrix() {
+    const aspect = canvas.width / canvas.height;
+
+    const projectionMatrix = mat4.perspective(
+      (2 * Math.PI) / 5,
+      aspect,
+      1,
+      2000.0
+    );
+
+    const upVector = vec3.fromValues(0, 1, 0);
+    const origin = vec3.fromValues(0, 0, 0);
+    const eyePosition = vec3.fromValues(0, 5, -100);
+
+    const rad = Math.PI * (Date.now() / 5000);
+    const rotation = mat4.rotateY(mat4.translation(origin), rad);
+    vec3.transformMat4(eyePosition, rotation, eyePosition);
+
+    const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);
+
+    const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);
+    return viewProjMatrix as Float32Array;
+  }
+
+  return function doDraw() {
+    // update the uniform buffer
+    {
+      const buffer = new ArrayBuffer(uniformBuffer.size);
+
+      new Float32Array(buffer).set(getCameraViewProjMatrix());
+      new Uint32Array(buffer, 16 * Float32Array.BYTES_PER_ELEMENT).set([
+        averageLayersPerFragment * canvas.width * sliceHeight,
+        canvas.width,
+      ]);
+
+      device.queue.writeBuffer(uniformBuffer, 0, buffer);
+    }
+
+    const commandEncoder = device.createCommandEncoder();
+    const textureView = context.getCurrentTexture().createView();
+
+    // Draw the opaque objects
+    opaquePassDescriptor.colorAttachments[0].view = textureView;
+    const opaquePassEncoder =
+      commandEncoder.beginRenderPass(opaquePassDescriptor);
+    opaquePassEncoder.setPipeline(opaquePipeline);
+    opaquePassEncoder.setBindGroup(0, opaqueBindGroup);
+    opaquePassEncoder.setVertexBuffer(0, vertexBuffer);
+    opaquePassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+    opaquePassEncoder.drawIndexed(mesh.triangles.length * 3, 8);
+    opaquePassEncoder.end();
+
+    for (let slice = 0; slice < numSlices; ++slice) {
+      // initialize the heads buffer
+      commandEncoder.copyBufferToBuffer(
+        headsInitBuffer,
+        0,
+        headsBuffer,
+        0,
+        headsInitBuffer.size
+      );
+
+      const scissorX = 0;
+      const scissorY = slice * sliceHeight;
+      const scissorWidth = canvas.width;
+      const scissorHeight =
+        Math.min((slice + 1) * sliceHeight, canvas.height) -
+        slice * sliceHeight;
+
+      // Draw the translucent objects
+      translucentPassDescriptor.colorAttachments[0].view = textureView;
+      const translucentPassEncoder = commandEncoder.beginRenderPass(
+        translucentPassDescriptor
+      );
+
+      // Set the scissor to only process a horizontal slice of the frame
+      translucentPassEncoder.setScissorRect(
+        scissorX,
+        scissorY,
+        scissorWidth,
+        scissorHeight
+      );
+
+      translucentPassEncoder.setPipeline(translucentPipeline);
+      translucentPassEncoder.setBindGroup(0, translucentBindGroup, [
+        slice * device.limits.minUniformBufferOffsetAlignment,
+      ]);
+      translucentPassEncoder.setVertexBuffer(0, vertexBuffer);
+      translucentPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+      translucentPassEncoder.drawIndexed(mesh.triangles.length * 3, 8);
+      translucentPassEncoder.end();
+
+      // Composite the opaque and translucent objects
+      compositePassDescriptor.colorAttachments[0].view = textureView;
+      const compositePassEncoder = commandEncoder.beginRenderPass(
+        compositePassDescriptor
+      );
+
+      // Set the scissor to only process a horizontal slice of the frame
+      compositePassEncoder.setScissorRect(
+        scissorX,
+        scissorY,
+        scissorWidth,
+        scissorHeight
+      );
+
+      compositePassEncoder.setPipeline(compositePipeline);
+      compositePassEncoder.setBindGroup(0, compositeBindGroup, [
+        slice * device.limits.minUniformBufferOffsetAlignment,
+      ]);
+      compositePassEncoder.draw(6);
+      compositePassEncoder.end();
+    }
+
+    device.queue.submit([commandEncoder.finish()]);
+  };
+};
+
+let doDraw = configure();
+
+const updateSettings = () => {
+  doDraw = configure();
+};
+
+const gui = new GUI();
+gui
+  .add(settings, 'memoryStrategy', ['multipass', 'clamp-pixel-ratio'])
+  .onFinishChange(updateSettings);
+
+function frame() {
+  doDraw();
+
+  requestAnimationFrame(frame);
+}
+
+requestAnimationFrame(frame);
diff --git a/sample/a-buffer/meta.ts b/sample/a-buffer/meta.ts
new file mode 100644
index 00000000..d06443ae
--- /dev/null
+++ b/sample/a-buffer/meta.ts
@@ -0,0 +1,13 @@
+export default {
+  name: 'A-Buffer',
+  description: `Demonstrates order independent transparency using a per-pixel 
+     linked-list of translucent fragments. Provides a choice for 
+     limiting memory usage (when required).`,
+  filename: 'sample/a-buffer',
+  sources: [
+    { path: 'main.ts' },
+    { path: 'opaque.wgsl' },
+    { path: 'translucent.wgsl' },
+    { path: 'composite.wgsl' },
+  ],
+};
diff --git a/src/sample/a-buffer/opaque.wgsl b/sample/a-buffer/opaque.wgsl
similarity index 99%
rename from src/sample/a-buffer/opaque.wgsl
rename to sample/a-buffer/opaque.wgsl
index 6ec6aee5..2f9ab25b 100644
--- a/src/sample/a-buffer/opaque.wgsl
+++ b/sample/a-buffer/opaque.wgsl
@@ -41,4 +41,4 @@ fn main_fs(@location(0) @interpolate(flat) instance: u32) -> @location(0) vec4, @location(0) @interpolate(fla
     linkedList.data[fragIndex].next = lastHead;
     linkedList.data[fragIndex].color = vec4(colors[(instance + 3u) % 6u], 0.3);
   }
-}
\ No newline at end of file
+}
diff --git a/src/sample/animometer/animometer.wgsl b/sample/animometer/animometer.wgsl
similarity index 100%
rename from src/sample/animometer/animometer.wgsl
rename to sample/animometer/animometer.wgsl
diff --git a/sample/animometer/index.html b/sample/animometer/index.html
new file mode 100644
index 00000000..90deae6b
--- /dev/null
+++ b/sample/animometer/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: animometer 
+    
+    
+    
+  
+  
+    webgpu-samples: bitonicSort 
+    
+    
+    
+  
+  
+     = {
 
 export type SampleInitParams = {
   canvas: HTMLCanvasElement;
-  pageState: { active: boolean };
   gui?: GUI;
   stats?: Stats;
 };
@@ -107,11 +105,12 @@ type CallbackAsync3D = (
 ) => Promise;
 
 type SampleInitCallback3D = CallbackSync3D | CallbackAsync3D;
+export type SampleInit = (params: SampleInitParams) => void;
 
 export const SampleInitFactoryWebGPU = async (
   callback: SampleInitCallback3D
 ): Promise => {
-  const init: SampleInit = async ({ canvas, pageState, gui, stats }) => {
+  const init = async ({ canvas, gui, stats }) => {
     const adapter = await navigator.gpu.requestAdapter();
     const timestampQueryAvailable = adapter.features.has('timestamp-query');
     let device: GPUDevice;
@@ -122,7 +121,6 @@ export const SampleInitFactoryWebGPU = async (
     } else {
       device = await adapter.requestDevice();
     }
-    if (!pageState.active) return;
     const context = canvas.getContext('webgpu') as GPUCanvasContext;
     const devicePixelRatio = window.devicePixelRatio;
     canvas.width = canvas.clientWidth * devicePixelRatio;
@@ -136,7 +134,6 @@ export const SampleInitFactoryWebGPU = async (
 
     callback({
       canvas,
-      pageState,
       gui,
       device,
       context,
diff --git a/src/sample/cameras/camera.ts b/sample/cameras/camera.ts
similarity index 98%
rename from src/sample/cameras/camera.ts
rename to sample/cameras/camera.ts
index d9b7e49c..ef671479 100644
--- a/src/sample/cameras/camera.ts
+++ b/sample/cameras/camera.ts
@@ -4,12 +4,6 @@
 import { Mat4, Vec3, Vec4, mat4, vec3 } from 'wgpu-matrix';
 import Input from './input';
 
-// Information about this file, used by the sample UI
-export const cameraSourceInfo = {
-  name: __filename.substring(__dirname.length + 1),
-  contents: __SOURCE__,
-};
-
 // Common interface for camera implementations
 export default interface Camera {
   // update updates the camera using the user-input and returns the view matrix.
diff --git a/src/sample/cameras/cube.wgsl b/sample/cameras/cube.wgsl
similarity index 100%
rename from src/sample/cameras/cube.wgsl
rename to sample/cameras/cube.wgsl
diff --git a/sample/cameras/index.html b/sample/cameras/index.html
new file mode 100644
index 00000000..d8cdc53c
--- /dev/null
+++ b/sample/cameras/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: cameras 
+    
+    
+    
+  
+  
+    webgpu-samples: computeBoids 
+    
+    
+    
+  
+  
+    ) -> @location(0) vec4 {
   return color;
-}
\ No newline at end of file
+}
diff --git a/src/sample/computeBoids/updateSprites.wgsl b/sample/computeBoids/updateSprites.wgsl
similarity index 100%
rename from src/sample/computeBoids/updateSprites.wgsl
rename to sample/computeBoids/updateSprites.wgsl
diff --git a/src/sample/cornell/common.ts b/sample/cornell/common.ts
similarity index 96%
rename from src/sample/cornell/common.ts
rename to sample/cornell/common.ts
index fe936bb3..9233fd73 100644
--- a/src/sample/cornell/common.ts
+++ b/sample/cornell/common.ts
@@ -5,11 +5,6 @@ import commonWGSL from './common.wgsl';
  * Common holds the shared WGSL between the shaders, including the common uniform buffer.
  */
 export default class Common {
-  static sourceInfo = {
-    name: __filename.substring(__dirname.length + 1),
-    contents: __SOURCE__,
-  };
-
   /** The WGSL of the common shader */
   readonly wgsl = commonWGSL;
   /** The common uniform buffer bind group and layout */
diff --git a/src/sample/cornell/common.wgsl b/sample/cornell/common.wgsl
similarity index 100%
rename from src/sample/cornell/common.wgsl
rename to sample/cornell/common.wgsl
diff --git a/sample/cornell/index.html b/sample/cornell/index.html
new file mode 100644
index 00000000..b20fdef0
--- /dev/null
+++ b/sample/cornell/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: cornell 
+    
+    
+    
+  
+  
+    webgpu-samples: cubemap 
+    
+    
+    
+  
+  
+    ;
 @group(0) @binding(1) var gBufferAlbedo: texture_2d;
 @group(0) @binding(2) var gBufferDepth: texture_depth_2d;
diff --git a/src/sample/deferredRendering/fragmentGBuffersDebugView.wgsl b/sample/deferredRendering/fragmentGBuffersDebugView.wgsl
similarity index 99%
rename from src/sample/deferredRendering/fragmentGBuffersDebugView.wgsl
rename to sample/deferredRendering/fragmentGBuffersDebugView.wgsl
index db0ea960..28ef03b3 100644
--- a/src/sample/deferredRendering/fragmentGBuffersDebugView.wgsl
+++ b/sample/deferredRendering/fragmentGBuffersDebugView.wgsl
@@ -1,4 +1,3 @@
-
 @group(0) @binding(0) var gBufferNormal: texture_2d;
 @group(0) @binding(1) var gBufferAlbedo: texture_2d;
 @group(0) @binding(2) var gBufferDepth: texture_depth_2d;
diff --git a/src/sample/deferredRendering/fragmentWriteGBuffers.wgsl b/sample/deferredRendering/fragmentWriteGBuffers.wgsl
similarity index 100%
rename from src/sample/deferredRendering/fragmentWriteGBuffers.wgsl
rename to sample/deferredRendering/fragmentWriteGBuffers.wgsl
diff --git a/sample/deferredRendering/index.html b/sample/deferredRendering/index.html
new file mode 100644
index 00000000..245f2240
--- /dev/null
+++ b/sample/deferredRendering/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: deferredRendering 
+    
+    
+    
+  
+  
+     = [
+  {
+    arrayStride: Float32Array.BYTES_PER_ELEMENT * 8,
+    attributes: [
+      {
+        // position
+        shaderLocation: 0,
+        offset: 0,
+        format: 'float32x3',
+      },
+      {
+        // normal
+        shaderLocation: 1,
+        offset: Float32Array.BYTES_PER_ELEMENT * 3,
+        format: 'float32x3',
+      },
+      {
+        // uv
+        shaderLocation: 2,
+        offset: Float32Array.BYTES_PER_ELEMENT * 6,
+        format: 'float32x2',
+      },
+    ],
+  },
+];
+
+const primitive: GPUPrimitiveState = {
+  topology: 'triangle-list',
+  cullMode: 'back',
+};
+
+const writeGBuffersPipeline = device.createRenderPipeline({
+  layout: 'auto',
+  vertex: {
+    module: device.createShaderModule({
+      code: vertexWriteGBuffers,
+    }),
+    entryPoint: 'main',
+    buffers: vertexBuffers,
+  },
+  fragment: {
+    module: device.createShaderModule({
+      code: fragmentWriteGBuffers,
+    }),
+    entryPoint: 'main',
+    targets: [
+      // normal
+      { format: 'rgba16float' },
+      // albedo
+      { format: 'bgra8unorm' },
+    ],
+  },
+  depthStencil: {
+    depthWriteEnabled: true,
+    depthCompare: 'less',
+    format: 'depth24plus',
+  },
+  primitive,
+});
+
+const gBufferTexturesBindGroupLayout = device.createBindGroupLayout({
+  entries: [
+    {
+      binding: 0,
+      visibility: GPUShaderStage.FRAGMENT,
+      texture: {
+        sampleType: 'unfilterable-float',
+      },
+    },
+    {
+      binding: 1,
+      visibility: GPUShaderStage.FRAGMENT,
+      texture: {
+        sampleType: 'unfilterable-float',
+      },
+    },
+    {
+      binding: 2,
+      visibility: GPUShaderStage.FRAGMENT,
+      texture: {
+        sampleType: 'depth',
+      },
+    },
+  ],
+});
+
+const lightsBufferBindGroupLayout = device.createBindGroupLayout({
+  entries: [
+    {
+      binding: 0,
+      visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
+      buffer: {
+        type: 'read-only-storage',
+      },
+    },
+    {
+      binding: 1,
+      visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
+      buffer: {
+        type: 'uniform',
+      },
+    },
+    {
+      binding: 2,
+      visibility: GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'uniform',
+      },
+    },
+  ],
+});
+
+const gBuffersDebugViewPipeline = device.createRenderPipeline({
+  layout: device.createPipelineLayout({
+    bindGroupLayouts: [gBufferTexturesBindGroupLayout],
+  }),
+  vertex: {
+    module: device.createShaderModule({
+      code: vertexTextureQuad,
+    }),
+    entryPoint: 'main',
+  },
+  fragment: {
+    module: device.createShaderModule({
+      code: fragmentGBuffersDebugView,
+    }),
+    entryPoint: 'main',
+    targets: [
+      {
+        format: presentationFormat,
+      },
+    ],
+    constants: {
+      canvasSizeWidth: canvas.width,
+      canvasSizeHeight: canvas.height,
+    },
+  },
+  primitive,
+});
+
+const deferredRenderPipeline = device.createRenderPipeline({
+  layout: device.createPipelineLayout({
+    bindGroupLayouts: [
+      gBufferTexturesBindGroupLayout,
+      lightsBufferBindGroupLayout,
+    ],
+  }),
+  vertex: {
+    module: device.createShaderModule({
+      code: vertexTextureQuad,
+    }),
+    entryPoint: 'main',
+  },
+  fragment: {
+    module: device.createShaderModule({
+      code: fragmentDeferredRendering,
+    }),
+    entryPoint: 'main',
+    targets: [
+      {
+        format: presentationFormat,
+      },
+    ],
+  },
+  primitive,
+});
+
+const writeGBufferPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      view: gBufferTextureViews[0],
+
+      clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+    {
+      view: gBufferTextureViews[1],
+
+      clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+  ],
+  depthStencilAttachment: {
+    view: depthTexture.createView(),
+
+    depthClearValue: 1.0,
+    depthLoadOp: 'clear',
+    depthStoreOp: 'store',
+  },
+};
+
+const textureQuadPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      // view is acquired and set in render loop.
+      view: undefined,
+
+      clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+  ],
+};
+
+const settings = {
+  mode: 'rendering',
+  numLights: 128,
+};
+const configUniformBuffer = (() => {
+  const buffer = device.createBuffer({
+    size: Uint32Array.BYTES_PER_ELEMENT,
+    mappedAtCreation: true,
+    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+  });
+  new Uint32Array(buffer.getMappedRange())[0] = settings.numLights;
+  buffer.unmap();
+  return buffer;
+})();
+
+const gui = new GUI();
+gui.add(settings, 'mode', ['rendering', 'gBuffers view']);
+gui
+  .add(settings, 'numLights', 1, kMaxNumLights)
+  .step(1)
+  .onChange(() => {
+    device.queue.writeBuffer(
+      configUniformBuffer,
+      0,
+      new Uint32Array([settings.numLights])
+    );
+  });
+
+const modelUniformBuffer = device.createBuffer({
+  size: 4 * 16 * 2, // two 4x4 matrix
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+const cameraUniformBuffer = device.createBuffer({
+  size: 4 * 16 * 2, // two 4x4 matrix
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+const sceneUniformBindGroup = device.createBindGroup({
+  layout: writeGBuffersPipeline.getBindGroupLayout(0),
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: modelUniformBuffer,
+      },
+    },
+    {
+      binding: 1,
+      resource: {
+        buffer: cameraUniformBuffer,
+      },
+    },
+  ],
+});
+
+const gBufferTexturesBindGroup = device.createBindGroup({
+  layout: gBufferTexturesBindGroupLayout,
+  entries: [
+    {
+      binding: 0,
+      resource: gBufferTextureViews[0],
+    },
+    {
+      binding: 1,
+      resource: gBufferTextureViews[1],
+    },
+    {
+      binding: 2,
+      resource: gBufferTextureViews[2],
+    },
+  ],
+});
+
+// Lights data are uploaded in a storage buffer
+// which could be updated/culled/etc. with a compute shader
+const extent = vec3.sub(lightExtentMax, lightExtentMin);
+const lightDataStride = 8;
+const bufferSizeInByte =
+  Float32Array.BYTES_PER_ELEMENT * lightDataStride * kMaxNumLights;
+const lightsBuffer = device.createBuffer({
+  size: bufferSizeInByte,
+  usage: GPUBufferUsage.STORAGE,
+  mappedAtCreation: true,
+});
+
+// We randomaly populate lights randomly in a box range
+// And simply move them along y-axis per frame to show they are
+// dynamic lightings
+const lightData = new Float32Array(lightsBuffer.getMappedRange());
+const tmpVec4 = vec4.create();
+let offset = 0;
+for (let i = 0; i < kMaxNumLights; i++) {
+  offset = lightDataStride * i;
+  // position
+  for (let i = 0; i < 3; i++) {
+    tmpVec4[i] = Math.random() * extent[i] + lightExtentMin[i];
+  }
+  tmpVec4[3] = 1;
+  lightData.set(tmpVec4, offset);
+  // color
+  tmpVec4[0] = Math.random() * 2;
+  tmpVec4[1] = Math.random() * 2;
+  tmpVec4[2] = Math.random() * 2;
+  // radius
+  tmpVec4[3] = 20.0;
+  lightData.set(tmpVec4, offset + 4);
+}
+lightsBuffer.unmap();
+
+const lightExtentBuffer = device.createBuffer({
+  size: 4 * 8,
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+const lightExtentData = new Float32Array(8);
+lightExtentData.set(lightExtentMin, 0);
+lightExtentData.set(lightExtentMax, 4);
+device.queue.writeBuffer(
+  lightExtentBuffer,
+  0,
+  lightExtentData.buffer,
+  lightExtentData.byteOffset,
+  lightExtentData.byteLength
+);
+
+const lightUpdateComputePipeline = device.createComputePipeline({
+  layout: 'auto',
+  compute: {
+    module: device.createShaderModule({
+      code: lightUpdate,
+    }),
+    entryPoint: 'main',
+  },
+});
+const lightsBufferBindGroup = device.createBindGroup({
+  layout: lightsBufferBindGroupLayout,
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: lightsBuffer,
+      },
+    },
+    {
+      binding: 1,
+      resource: {
+        buffer: configUniformBuffer,
+      },
+    },
+    {
+      binding: 2,
+      resource: {
+        buffer: cameraUniformBuffer,
+      },
+    },
+  ],
+});
+const lightsBufferComputeBindGroup = device.createBindGroup({
+  layout: lightUpdateComputePipeline.getBindGroupLayout(0),
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: lightsBuffer,
+      },
+    },
+    {
+      binding: 1,
+      resource: {
+        buffer: configUniformBuffer,
+      },
+    },
+    {
+      binding: 2,
+      resource: {
+        buffer: lightExtentBuffer,
+      },
+    },
+  ],
+});
+//--------------------
+
+// Scene matrices
+const eyePosition = vec3.fromValues(0, 50, -100);
+const upVector = vec3.fromValues(0, 1, 0);
+const origin = vec3.fromValues(0, 0, 0);
+
+const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 2000.0);
+
+// Move the model so it's centered.
+const modelMatrix = mat4.translation([0, -45, 0]);
+
+const modelData = modelMatrix as Float32Array;
+device.queue.writeBuffer(
+  modelUniformBuffer,
+  0,
+  modelData.buffer,
+  modelData.byteOffset,
+  modelData.byteLength
+);
+const invertTransposeModelMatrix = mat4.invert(modelMatrix);
+mat4.transpose(invertTransposeModelMatrix, invertTransposeModelMatrix);
+const normalModelData = invertTransposeModelMatrix as Float32Array;
+device.queue.writeBuffer(
+  modelUniformBuffer,
+  64,
+  normalModelData.buffer,
+  normalModelData.byteOffset,
+  normalModelData.byteLength
+);
+
+// Rotates the camera around the origin based on time.
+function getCameraViewProjMatrix() {
+  const rad = Math.PI * (Date.now() / 5000);
+  const rotation = mat4.rotateY(mat4.translation(origin), rad);
+  const rotatedEyePosition = vec3.transformMat4(eyePosition, rotation);
+
+  const viewMatrix = mat4.lookAt(rotatedEyePosition, origin, upVector);
+
+  return mat4.multiply(projectionMatrix, viewMatrix) as Float32Array;
+}
+
+function frame() {
+  const cameraViewProj = getCameraViewProjMatrix();
+  device.queue.writeBuffer(
+    cameraUniformBuffer,
+    0,
+    cameraViewProj.buffer,
+    cameraViewProj.byteOffset,
+    cameraViewProj.byteLength
+  );
+  const cameraInvViewProj = mat4.invert(cameraViewProj) as Float32Array;
+  device.queue.writeBuffer(
+    cameraUniformBuffer,
+    64,
+    cameraInvViewProj.buffer,
+    cameraInvViewProj.byteOffset,
+    cameraInvViewProj.byteLength
+  );
+
+  const commandEncoder = device.createCommandEncoder();
+  {
+    // Write position, normal, albedo etc. data to gBuffers
+    const gBufferPass = commandEncoder.beginRenderPass(
+      writeGBufferPassDescriptor
+    );
+    gBufferPass.setPipeline(writeGBuffersPipeline);
+    gBufferPass.setBindGroup(0, sceneUniformBindGroup);
+    gBufferPass.setVertexBuffer(0, vertexBuffer);
+    gBufferPass.setIndexBuffer(indexBuffer, 'uint16');
+    gBufferPass.drawIndexed(indexCount);
+    gBufferPass.end();
+  }
+  {
+    // Update lights position
+    const lightPass = commandEncoder.beginComputePass();
+    lightPass.setPipeline(lightUpdateComputePipeline);
+    lightPass.setBindGroup(0, lightsBufferComputeBindGroup);
+    lightPass.dispatchWorkgroups(Math.ceil(kMaxNumLights / 64));
+    lightPass.end();
+  }
+  {
+    if (settings.mode === 'gBuffers view') {
+      // GBuffers debug view
+      // Left: depth
+      // Middle: normal
+      // Right: albedo (use uv to mimic a checkerboard texture)
+      textureQuadPassDescriptor.colorAttachments[0].view = context
+        .getCurrentTexture()
+        .createView();
+      const debugViewPass = commandEncoder.beginRenderPass(
+        textureQuadPassDescriptor
+      );
+      debugViewPass.setPipeline(gBuffersDebugViewPipeline);
+      debugViewPass.setBindGroup(0, gBufferTexturesBindGroup);
+      debugViewPass.draw(6);
+      debugViewPass.end();
+    } else {
+      // Deferred rendering
+      textureQuadPassDescriptor.colorAttachments[0].view = context
+        .getCurrentTexture()
+        .createView();
+      const deferredRenderingPass = commandEncoder.beginRenderPass(
+        textureQuadPassDescriptor
+      );
+      deferredRenderingPass.setPipeline(deferredRenderPipeline);
+      deferredRenderingPass.setBindGroup(0, gBufferTexturesBindGroup);
+      deferredRenderingPass.setBindGroup(1, lightsBufferBindGroup);
+      deferredRenderingPass.draw(6);
+      deferredRenderingPass.end();
+    }
+  }
+  device.queue.submit([commandEncoder.finish()]);
+
+  requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/deferredRendering/meta.ts b/sample/deferredRendering/meta.ts
new file mode 100644
index 00000000..47c85ed4
--- /dev/null
+++ b/sample/deferredRendering/meta.ts
@@ -0,0 +1,22 @@
+export default {
+  name: 'Deferred Rendering',
+  description: `This example shows how to do deferred rendering with webgpu.
+    Render geometry info to multiple targets in the gBuffers in the first pass.
+    In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.
+    And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.
+    World-space positions are reconstructed from the depth texture and camera matrix.
+    We also update light position in a compute shader, where further operations like tile/cluster culling could happen.
+    The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer
+    in the middle, and the albedo G-buffer on the right side of the screen.
+    `,
+  filename: 'sample/deferredRendering',
+  sources: [
+    { path: 'main.ts' },
+    { path: 'vertexWriteGBuffers.wgsl' },
+    { path: 'fragmentWriteGBuffers.wgsl' },
+    { path: 'vertexTextureQuad.wgsl' },
+    { path: 'fragmentGBuffersDebugView.wgsl' },
+    { path: 'fragmentDeferredRendering.wgsl' },
+    { path: 'lightUpdate.wgsl' },
+  ],
+};
diff --git a/src/sample/deferredRendering/vertexTextureQuad.wgsl b/sample/deferredRendering/vertexTextureQuad.wgsl
similarity index 100%
rename from src/sample/deferredRendering/vertexTextureQuad.wgsl
rename to sample/deferredRendering/vertexTextureQuad.wgsl
diff --git a/src/sample/deferredRendering/vertexWriteGBuffers.wgsl b/sample/deferredRendering/vertexWriteGBuffers.wgsl
similarity index 100%
rename from src/sample/deferredRendering/vertexWriteGBuffers.wgsl
rename to sample/deferredRendering/vertexWriteGBuffers.wgsl
diff --git a/sample/fractalCube/index.html b/sample/fractalCube/index.html
new file mode 100644
index 00000000..b9b7ae55
--- /dev/null
+++ b/sample/fractalCube/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: fractalCube 
+    
+    
+    
+  
+  
+    webgpu-samples: gameOfLife 
+    
+    
+    
+  
+  
+    webgpu-samples: helloTriangle 
+    
+    
+    
+  
+  
+    webgpu-samples: helloTriangleMSAA 
+    
+    
+    
+  
+  
+    webgpu-samples: imageBlur 
+    
+    
+    
+  
+  
+    webgpu-samples: instancedCube 
+    
+    
+    
+  
+  
+    (numInstances);
+const mvpMatricesData = new Float32Array(matrixFloatCount * numInstances);
+
+const step = 4.0;
+
+// Initialize the matrix data for every instance.
+let m = 0;
+for (let x = 0; x < xCount; x++) {
+  for (let y = 0; y < yCount; y++) {
+    modelMatrices[m] = mat4.translation(
+      vec3.fromValues(
+        step * (x - xCount / 2 + 0.5),
+        step * (y - yCount / 2 + 0.5),
+        0
+      )
+    );
+    m++;
+  }
+}
+
+const viewMatrix = mat4.translation(vec3.fromValues(0, 0, -12));
+
+const tmpMat4 = mat4.create();
+
+// Update the transformation matrix data for each instance.
+function updateTransformationMatrix() {
+  const now = Date.now() / 1000;
+
+  let m = 0,
+    i = 0;
+  for (let x = 0; x < xCount; x++) {
+    for (let y = 0; y < yCount; y++) {
+      mat4.rotate(
+        modelMatrices[i],
+        vec3.fromValues(
+          Math.sin((x + 0.5) * now),
+          Math.cos((y + 0.5) * now),
+          0
+        ),
+        1,
+        tmpMat4
+      );
+
+      mat4.multiply(viewMatrix, tmpMat4, tmpMat4);
+      mat4.multiply(projectionMatrix, tmpMat4, tmpMat4);
+
+      mvpMatricesData.set(tmpMat4, m);
+
+      i++;
+      m += matrixFloatCount;
+    }
+  }
+}
+
+const renderPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      view: undefined, // Assigned later
+
+      clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+  ],
+  depthStencilAttachment: {
+    view: depthTexture.createView(),
+
+    depthClearValue: 1.0,
+    depthLoadOp: 'clear',
+    depthStoreOp: 'store',
+  },
+};
+
+function frame() {
+  // Update the matrix data.
+  updateTransformationMatrix();
+  device.queue.writeBuffer(
+    uniformBuffer,
+    0,
+    mvpMatricesData.buffer,
+    mvpMatricesData.byteOffset,
+    mvpMatricesData.byteLength
+  );
+
+  renderPassDescriptor.colorAttachments[0].view = context
+    .getCurrentTexture()
+    .createView();
+
+  const commandEncoder = device.createCommandEncoder();
+  const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+  passEncoder.setPipeline(pipeline);
+  passEncoder.setBindGroup(0, uniformBindGroup);
+  passEncoder.setVertexBuffer(0, verticesBuffer);
+  passEncoder.draw(cubeVertexCount, numInstances, 0, 0);
+  passEncoder.end();
+  device.queue.submit([commandEncoder.finish()]);
+
+  requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/instancedCube/meta.ts b/sample/instancedCube/meta.ts
new file mode 100644
index 00000000..4c7a130d
--- /dev/null
+++ b/sample/instancedCube/meta.ts
@@ -0,0 +1,11 @@
+export default {
+  name: 'Instanced Cube',
+  description: 'This example shows the use of instancing.',
+  filename: 'sample/instancedCube',
+  sources: [
+    { path: 'main.ts' },
+    { path: 'instanced.vert.wgsl' },
+    { path: '../../shaders/vertexPositionColor.frag.wgsl' },
+    { path: '../../meshes/cube.ts' },
+  ],
+};
diff --git a/sample/normalMap/index.html b/sample/normalMap/index.html
new file mode 100644
index 00000000..ee008e6a
--- /dev/null
+++ b/sample/normalMap/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: normalMap 
+    
+    
+    
+  
+  
+    webgpu-samples: particles 
+    
+    
+    
+  
+  
+    
+        stepMode: 'vertex',
+        attributes: [
+          {
+            // vertex positions
+            shaderLocation: 2,
+            offset: 0,
+            format: 'float32x2',
+          },
+        ],
+      },
+    ],
+  },
+  fragment: {
+    module: device.createShaderModule({
+      code: particleWGSL,
+    }),
+    entryPoint: 'fs_main',
+    targets: [
+      {
+        format: presentationFormat,
+        blend: {
+          color: {
+            srcFactor: 'src-alpha',
+            dstFactor: 'one',
+            operation: 'add',
+          },
+          alpha: {
+            srcFactor: 'zero',
+            dstFactor: 'one',
+            operation: 'add',
+          },
+        },
+      },
+    ],
+  },
+  primitive: {
+    topology: 'triangle-list',
+  },
+
+  depthStencil: {
+    depthWriteEnabled: false,
+    depthCompare: 'less',
+    format: 'depth24plus',
+  },
+});
+
+const depthTexture = device.createTexture({
+  size: [canvas.width, canvas.height],
+  format: 'depth24plus',
+  usage: GPUTextureUsage.RENDER_ATTACHMENT,
+});
+
+const uniformBufferSize =
+  4 * 4 * 4 + // modelViewProjectionMatrix : mat4x4
+  3 * 4 + // right : vec3
+  4 + // padding
+  3 * 4 + // up : vec3
+  4 + // padding
+  0;
+const uniformBuffer = device.createBuffer({
+  size: uniformBufferSize,
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+const uniformBindGroup = device.createBindGroup({
+  layout: renderPipeline.getBindGroupLayout(0),
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: uniformBuffer,
+      },
+    },
+  ],
+});
+
+const renderPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      view: undefined, // Assigned later
+      clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+  ],
+  depthStencilAttachment: {
+    view: depthTexture.createView(),
+
+    depthClearValue: 1.0,
+    depthLoadOp: 'clear',
+    depthStoreOp: 'store',
+  },
+};
+
+//////////////////////////////////////////////////////////////////////////////
+// Quad vertex buffer
+//////////////////////////////////////////////////////////////////////////////
+const quadVertexBuffer = device.createBuffer({
+  size: 6 * 2 * 4, // 6x vec2
+  usage: GPUBufferUsage.VERTEX,
+  mappedAtCreation: true,
+});
+// prettier-ignore
+const vertexData = [
+  -1.0, -1.0, +1.0, -1.0, -1.0, +1.0, -1.0, +1.0, +1.0, -1.0, +1.0, +1.0,
+];
+new Float32Array(quadVertexBuffer.getMappedRange()).set(vertexData);
+quadVertexBuffer.unmap();
+
+//////////////////////////////////////////////////////////////////////////////
+// Texture
+//////////////////////////////////////////////////////////////////////////////
+let texture: GPUTexture;
+let textureWidth = 1;
+let textureHeight = 1;
+let numMipLevels = 1;
+{
+  const response = await fetch('../../assets/img/webgpu.png');
+  const imageBitmap = await createImageBitmap(await response.blob());
+
+  // Calculate number of mip levels required to generate the probability map
+  while (
+    textureWidth < imageBitmap.width ||
+    textureHeight < imageBitmap.height
+  ) {
+    textureWidth *= 2;
+    textureHeight *= 2;
+    numMipLevels++;
+  }
+  texture = device.createTexture({
+    size: [imageBitmap.width, imageBitmap.height, 1],
+    mipLevelCount: numMipLevels,
+    format: 'rgba8unorm',
+    usage:
+      GPUTextureUsage.TEXTURE_BINDING |
+      GPUTextureUsage.STORAGE_BINDING |
+      GPUTextureUsage.COPY_DST |
+      GPUTextureUsage.RENDER_ATTACHMENT,
+  });
+  device.queue.copyExternalImageToTexture(
+    { source: imageBitmap },
+    { texture: texture },
+    [imageBitmap.width, imageBitmap.height]
+  );
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Probability map generation
+// The 0'th mip level of texture holds the color data and spawn-probability in
+// the alpha channel. The mip levels 1..N are generated to hold spawn
+// probabilities up to the top 1x1 mip level.
+//////////////////////////////////////////////////////////////////////////////
+{
+  const probabilityMapImportLevelPipeline = device.createComputePipeline({
+    layout: 'auto',
+    compute: {
+      module: device.createShaderModule({ code: probabilityMapWGSL }),
+      entryPoint: 'import_level',
+    },
+  });
+  const probabilityMapExportLevelPipeline = device.createComputePipeline({
+    layout: 'auto',
+    compute: {
+      module: device.createShaderModule({ code: probabilityMapWGSL }),
+      entryPoint: 'export_level',
+    },
+  });
+
+  const probabilityMapUBOBufferSize =
+    1 * 4 + // stride
+    3 * 4 + // padding
+    0;
+  const probabilityMapUBOBuffer = device.createBuffer({
+    size: probabilityMapUBOBufferSize,
+    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+  });
+  const buffer_a = device.createBuffer({
+    size: textureWidth * textureHeight * 4,
+    usage: GPUBufferUsage.STORAGE,
+  });
+  const buffer_b = device.createBuffer({
+    size: textureWidth * textureHeight * 4,
+    usage: GPUBufferUsage.STORAGE,
+  });
+  device.queue.writeBuffer(
+    probabilityMapUBOBuffer,
+    0,
+    new Int32Array([textureWidth])
+  );
+  const commandEncoder = device.createCommandEncoder();
+  for (let level = 0; level < numMipLevels; level++) {
+    const levelWidth = textureWidth >> level;
+    const levelHeight = textureHeight >> level;
+    const pipeline =
+      level == 0
+        ? probabilityMapImportLevelPipeline.getBindGroupLayout(0)
+        : probabilityMapExportLevelPipeline.getBindGroupLayout(0);
+    const probabilityMapBindGroup = device.createBindGroup({
+      layout: pipeline,
+      entries: [
+        {
+          // ubo
+          binding: 0,
+          resource: { buffer: probabilityMapUBOBuffer },
+        },
+        {
+          // buf_in
+          binding: 1,
+          resource: { buffer: level & 1 ? buffer_a : buffer_b },
+        },
+        {
+          // buf_out
+          binding: 2,
+          resource: { buffer: level & 1 ? buffer_b : buffer_a },
+        },
+        {
+          // tex_in / tex_out
+          binding: 3,
+          resource: texture.createView({
+            format: 'rgba8unorm',
+            dimension: '2d',
+            baseMipLevel: level,
+            mipLevelCount: 1,
+          }),
+        },
+      ],
+    });
+    if (level == 0) {
+      const passEncoder = commandEncoder.beginComputePass();
+      passEncoder.setPipeline(probabilityMapImportLevelPipeline);
+      passEncoder.setBindGroup(0, probabilityMapBindGroup);
+      passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);
+      passEncoder.end();
+    } else {
+      const passEncoder = commandEncoder.beginComputePass();
+      passEncoder.setPipeline(probabilityMapExportLevelPipeline);
+      passEncoder.setBindGroup(0, probabilityMapBindGroup);
+      passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);
+      passEncoder.end();
+    }
+  }
+  device.queue.submit([commandEncoder.finish()]);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Simulation compute pipeline
+//////////////////////////////////////////////////////////////////////////////
+const simulationParams = {
+  simulate: true,
+  deltaTime: 0.04,
+};
+
+const simulationUBOBufferSize =
+  1 * 4 + // deltaTime
+  3 * 4 + // padding
+  4 * 4 + // seed
+  0;
+const simulationUBOBuffer = device.createBuffer({
+  size: simulationUBOBufferSize,
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+const gui = new GUI();
+gui.add(simulationParams, 'simulate');
+gui.add(simulationParams, 'deltaTime');
+
+const computePipeline = device.createComputePipeline({
+  layout: 'auto',
+  compute: {
+    module: device.createShaderModule({
+      code: particleWGSL,
+    }),
+    entryPoint: 'simulate',
+  },
+});
+const computeBindGroup = device.createBindGroup({
+  layout: computePipeline.getBindGroupLayout(0),
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: simulationUBOBuffer,
+      },
+    },
+    {
+      binding: 1,
+      resource: {
+        buffer: particlesBuffer,
+        offset: 0,
+        size: numParticles * particleInstanceByteSize,
+      },
+    },
+    {
+      binding: 2,
+      resource: texture.createView(),
+    },
+  ],
+});
+
+const aspect = canvas.width / canvas.height;
+const projection = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 100.0);
+const view = mat4.create();
+const mvp = mat4.create();
+
+function frame() {
+  device.queue.writeBuffer(
+    simulationUBOBuffer,
+    0,
+    new Float32Array([
+      simulationParams.simulate ? simulationParams.deltaTime : 0.0,
+      0.0,
+      0.0,
+      0.0, // padding
+      Math.random() * 100,
+      Math.random() * 100, // seed.xy
+      1 + Math.random(),
+      1 + Math.random(), // seed.zw
+    ])
+  );
+
+  mat4.identity(view);
+  mat4.translate(view, vec3.fromValues(0, 0, -3), view);
+  mat4.rotateX(view, Math.PI * -0.2, view);
+  mat4.multiply(projection, view, mvp);
+
+  // prettier-ignore
+  device.queue.writeBuffer(
+    uniformBuffer,
+    0,
+    new Float32Array([
+      // modelViewProjectionMatrix
+      mvp[0], mvp[1], mvp[2], mvp[3],
+      mvp[4], mvp[5], mvp[6], mvp[7],
+      mvp[8], mvp[9], mvp[10], mvp[11],
+      mvp[12], mvp[13], mvp[14], mvp[15],
+
+      view[0], view[4], view[8], // right
+
+      0, // padding
+
+      view[1], view[5], view[9], // up
+
+      0, // padding
+    ])
+  );
+  const swapChainTexture = context.getCurrentTexture();
+  // prettier-ignore
+  renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();
+
+  const commandEncoder = device.createCommandEncoder();
+  {
+    const passEncoder = commandEncoder.beginComputePass();
+    passEncoder.setPipeline(computePipeline);
+    passEncoder.setBindGroup(0, computeBindGroup);
+    passEncoder.dispatchWorkgroups(Math.ceil(numParticles / 64));
+    passEncoder.end();
+  }
+  {
+    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+    passEncoder.setPipeline(renderPipeline);
+    passEncoder.setBindGroup(0, uniformBindGroup);
+    passEncoder.setVertexBuffer(0, particlesBuffer);
+    passEncoder.setVertexBuffer(1, quadVertexBuffer);
+    passEncoder.draw(6, numParticles, 0, 0);
+    passEncoder.end();
+  }
+
+  device.queue.submit([commandEncoder.finish()]);
+
+  requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/particles/meta.ts b/sample/particles/meta.ts
new file mode 100644
index 00000000..70e20722
--- /dev/null
+++ b/sample/particles/meta.ts
@@ -0,0 +1,11 @@
+export default {
+  name: 'Particles',
+  description:
+    'This example demonstrates rendering of particles simulated with compute shaders.',
+  filename: 'sample/particles',
+  sources: [
+    { path: 'main.ts' },
+    { path: './particle.wgsl' },
+    { path: './probabilityMap.wgsl' },
+  ],
+};
diff --git a/src/sample/particles/particle.wgsl b/sample/particles/particle.wgsl
similarity index 100%
rename from src/sample/particles/particle.wgsl
rename to sample/particles/particle.wgsl
diff --git a/src/sample/particles/probabilityMap.wgsl b/sample/particles/probabilityMap.wgsl
similarity index 100%
rename from src/sample/particles/probabilityMap.wgsl
rename to sample/particles/probabilityMap.wgsl
diff --git a/sample/renderBundles/index.html b/sample/renderBundles/index.html
new file mode 100644
index 00000000..6b0c4498
--- /dev/null
+++ b/sample/renderBundles/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: renderBundles 
+    
+    
+    
+  
+  
+    webgpu-samples: resizeCanvas 
+    
+    webgpu-samples: samplerParameters 
+    
+    
+    
+  
+  
+    (numInstances);
+const mvpMatricesData = new Float32Array(matrixFloatCount * numInstances);
+
+let m = 0;
+for (let x = 0; x < xCount; x++) {
+  for (let y = 0; y < yCount; y++) {
+    const z = -800 * m;
+    const s = 1 + 50 * m;
+
+    modelMatrices[m] = mat4.translation(
+      vec3.fromValues(
+        x - xCount / 2 + 0.5,
+        (4.0 - 0.2 * z) * (y - yCount / 2 + 1.0),
+        z
+      )
+    );
+    mat4.scale(modelMatrices[m], vec3.fromValues(s, s, s), modelMatrices[m]);
+
+    m++;
+  }
+}
+
+const viewMatrix = mat4.translation(vec3.fromValues(0, 0, -12));
+
+const aspect = (0.5 * canvas.width) / canvas.height;
+// wgpu-matrix perspective doesn't handle zFar === Infinity now.
+// https://github.com/greggman/wgpu-matrix/issues/9
+const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 5, 9999);
+
+const viewProjectionMatrix = mat4.multiply(projectionMatrix, viewMatrix);
+// to use 1/z we just multiple depthRangeRemapMatrix to our default camera view projection matrix
+const reversedRangeViewProjectionMatrix = mat4.multiply(
+  depthRangeRemapMatrix,
+  viewProjectionMatrix
+);
+
+let bufferData = viewProjectionMatrix as Float32Array;
+device.queue.writeBuffer(
+  cameraMatrixBuffer,
+  0,
+  bufferData.buffer,
+  bufferData.byteOffset,
+  bufferData.byteLength
+);
+bufferData = reversedRangeViewProjectionMatrix as Float32Array;
+device.queue.writeBuffer(
+  cameraMatrixReversedDepthBuffer,
+  0,
+  bufferData.buffer,
+  bufferData.byteOffset,
+  bufferData.byteLength
+);
+
+const tmpMat4 = mat4.create();
+function updateTransformationMatrix() {
+  const now = Date.now() / 1000;
+
+  for (let i = 0, m = 0; i < numInstances; i++, m += matrixFloatCount) {
+    mat4.rotate(
+      modelMatrices[i],
+      vec3.fromValues(Math.sin(now), Math.cos(now), 0),
+      (Math.PI / 180) * 30,
+      tmpMat4
+    );
+    mvpMatricesData.set(tmpMat4, m);
+  }
+}
+
+const settings = {
+  mode: 'color',
+};
+const gui = new GUI();
+gui.add(settings, 'mode', ['color', 'precision-error', 'depth-texture']);
+
+function frame() {
+  updateTransformationMatrix();
+  device.queue.writeBuffer(
+    uniformBuffer,
+    0,
+    mvpMatricesData.buffer,
+    mvpMatricesData.byteOffset,
+    mvpMatricesData.byteLength
+  );
+
+  const attachment = context.getCurrentTexture().createView();
+  const commandEncoder = device.createCommandEncoder();
+  if (settings.mode === 'color') {
+    for (const m of depthBufferModes) {
+      drawPassDescriptors[m].colorAttachments[0].view = attachment;
+      drawPassDescriptors[m].depthStencilAttachment.depthClearValue =
+        depthClearValues[m];
+      const colorPass = commandEncoder.beginRenderPass(drawPassDescriptors[m]);
+      colorPass.setPipeline(colorPassPipelines[m]);
+      colorPass.setBindGroup(0, uniformBindGroups[m]);
+      colorPass.setVertexBuffer(0, verticesBuffer);
+      colorPass.setViewport(
+        (canvas.width * m) / 2,
+        0,
+        canvas.width / 2,
+        canvas.height,
+        0,
+        1
+      );
+      colorPass.draw(geometryDrawCount, numInstances, 0, 0);
+      colorPass.end();
+    }
+  } else if (settings.mode === 'precision-error') {
+    for (const m of depthBufferModes) {
+      {
+        depthPrePassDescriptor.depthStencilAttachment.depthClearValue =
+          depthClearValues[m];
+        const depthPrePass = commandEncoder.beginRenderPass(
+          depthPrePassDescriptor
+        );
+        depthPrePass.setPipeline(depthPrePassPipelines[m]);
+        depthPrePass.setBindGroup(0, uniformBindGroups[m]);
+        depthPrePass.setVertexBuffer(0, verticesBuffer);
+        depthPrePass.setViewport(
+          (canvas.width * m) / 2,
+          0,
+          canvas.width / 2,
+          canvas.height,
+          0,
+          1
+        );
+        depthPrePass.draw(geometryDrawCount, numInstances, 0, 0);
+        depthPrePass.end();
+      }
+      {
+        drawPassDescriptors[m].colorAttachments[0].view = attachment;
+        drawPassDescriptors[m].depthStencilAttachment.depthClearValue =
+          depthClearValues[m];
+        const precisionErrorPass = commandEncoder.beginRenderPass(
+          drawPassDescriptors[m]
+        );
+        precisionErrorPass.setPipeline(precisionPassPipelines[m]);
+        precisionErrorPass.setBindGroup(0, uniformBindGroups[m]);
+        precisionErrorPass.setBindGroup(1, depthTextureBindGroup);
+        precisionErrorPass.setVertexBuffer(0, verticesBuffer);
+        precisionErrorPass.setViewport(
+          (canvas.width * m) / 2,
+          0,
+          canvas.width / 2,
+          canvas.height,
+          0,
+          1
+        );
+        precisionErrorPass.draw(geometryDrawCount, numInstances, 0, 0);
+        precisionErrorPass.end();
+      }
+    }
+  } else {
+    // depth texture quad
+    for (const m of depthBufferModes) {
+      {
+        depthPrePassDescriptor.depthStencilAttachment.depthClearValue =
+          depthClearValues[m];
+        const depthPrePass = commandEncoder.beginRenderPass(
+          depthPrePassDescriptor
+        );
+        depthPrePass.setPipeline(depthPrePassPipelines[m]);
+        depthPrePass.setBindGroup(0, uniformBindGroups[m]);
+        depthPrePass.setVertexBuffer(0, verticesBuffer);
+        depthPrePass.setViewport(
+          (canvas.width * m) / 2,
+          0,
+          canvas.width / 2,
+          canvas.height,
+          0,
+          1
+        );
+        depthPrePass.draw(geometryDrawCount, numInstances, 0, 0);
+        depthPrePass.end();
+      }
+      {
+        textureQuadPassDescriptors[m].colorAttachments[0].view = attachment;
+        const depthTextureQuadPass = commandEncoder.beginRenderPass(
+          textureQuadPassDescriptors[m]
+        );
+        depthTextureQuadPass.setPipeline(textureQuadPassPipline);
+        depthTextureQuadPass.setBindGroup(0, depthTextureBindGroup);
+        depthTextureQuadPass.setViewport(
+          (canvas.width * m) / 2,
+          0,
+          canvas.width / 2,
+          canvas.height,
+          0,
+          1
+        );
+        depthTextureQuadPass.draw(6);
+        depthTextureQuadPass.end();
+      }
+    }
+  }
+  device.queue.submit([commandEncoder.finish()]);
+  requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/reversedZ/meta.ts b/sample/reversedZ/meta.ts
new file mode 100644
index 00000000..eace7265
--- /dev/null
+++ b/sample/reversedZ/meta.ts
@@ -0,0 +1,24 @@
+export default {
+  name: 'Reversed Z',
+  description: `This example shows the use of reversed z technique for better utilization of depth buffer precision.
+    The left column uses regular method, while the right one uses reversed z technique.
+    Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other.
+    Higher sets are placed further from camera (and are scaled for better visual purpose).
+    To use reversed z to render your scene, you will need depth store value to be 0.0, depth compare function to be greater,
+    and remap depth range by multiplying an additional matrix to your projection matrix.
+    Related reading:
+    https://developer.nvidia.com/content/depth-precision-visualized
+    https://web.archive.org/web/20220724174000/https://thxforthefish.com/posts/reverse_z/
+    `,
+  filename: 'sample/reversedZ',
+  sources: [
+    { path: 'main.ts' },
+    { path: 'vertex.wgsl' },
+    { path: 'fragment.wgsl' },
+    { path: 'vertexDepthPrePass.wgsl' },
+    { path: 'vertexTextureQuad.wgsl' },
+    { path: 'fragmentTextureQuad.wgsl' },
+    { path: 'vertexPrecisionErrorPass.wgsl' },
+    { path: 'fragmentPrecisionErrorPass.wgsl' },
+  ],
+};
diff --git a/src/sample/reversedZ/vertex.wgsl b/sample/reversedZ/vertex.wgsl
similarity index 100%
rename from src/sample/reversedZ/vertex.wgsl
rename to sample/reversedZ/vertex.wgsl
diff --git a/src/sample/reversedZ/vertexDepthPrePass.wgsl b/sample/reversedZ/vertexDepthPrePass.wgsl
similarity index 100%
rename from src/sample/reversedZ/vertexDepthPrePass.wgsl
rename to sample/reversedZ/vertexDepthPrePass.wgsl
diff --git a/src/sample/reversedZ/vertexPrecisionErrorPass.wgsl b/sample/reversedZ/vertexPrecisionErrorPass.wgsl
similarity index 100%
rename from src/sample/reversedZ/vertexPrecisionErrorPass.wgsl
rename to sample/reversedZ/vertexPrecisionErrorPass.wgsl
diff --git a/src/sample/reversedZ/vertexTextureQuad.wgsl b/sample/reversedZ/vertexTextureQuad.wgsl
similarity index 100%
rename from src/sample/reversedZ/vertexTextureQuad.wgsl
rename to sample/reversedZ/vertexTextureQuad.wgsl
diff --git a/sample/rotatingCube/index.html b/sample/rotatingCube/index.html
new file mode 100644
index 00000000..9dd88227
--- /dev/null
+++ b/sample/rotatingCube/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: a-buffer 
+    
+    
+    
+  
+  
+    webgpu-samples: a-buffer 
+    
+    
+    
+  
+  
+     = new Float32Array([
+  // Row 1: Scale by 2
+  ...mat4.scale(mat4.rotationZ(Math.PI / 16), [2, 2, 1]),
+  ...mat4.scale(mat4.identity(), [2, 2, 1]),
+  ...mat4.scale(mat4.rotationX(-Math.PI * 0.3), [2, 2, 1]),
+  ...mat4.scale(mat4.rotationX(-Math.PI * 0.42), [2, 2, 1]),
+  // Row 2: Scale by 1
+  ...mat4.rotationZ(Math.PI / 16),
+  ...mat4.identity(),
+  ...mat4.rotationX(-Math.PI * 0.3),
+  ...mat4.rotationX(-Math.PI * 0.42),
+  // Row 3: Scale by 0.9
+  ...mat4.scale(mat4.rotationZ(Math.PI / 16), [0.9, 0.9, 1]),
+  ...mat4.scale(mat4.identity(), [0.9, 0.9, 1]),
+  ...mat4.scale(mat4.rotationX(-Math.PI * 0.3), [0.9, 0.9, 1]),
+  ...mat4.scale(mat4.rotationX(-Math.PI * 0.42), [0.9, 0.9, 1]),
+  // Row 4: Scale by 0.3
+  ...mat4.scale(mat4.rotationZ(Math.PI / 16), [0.3, 0.3, 1]),
+  ...mat4.scale(mat4.identity(), [0.3, 0.3, 1]),
+  ...mat4.scale(mat4.rotationX(-Math.PI * 0.3), [0.3, 0.3, 1]),
+]);
+
+const canvas = document.querySelector('canvas') as HTMLCanvasElement;
+const adapter = await navigator.gpu.requestAdapter();
+const device = await adapter.requestDevice();
+
+//
+// GUI controls
+//
+
+const kInitConfig = {
+  flangeLogSize: 1.0,
+  highlightFlange: false,
+  animation: 0.1,
+} as const;
+const config = { ...kInitConfig };
+const updateConfigBuffer = () => {
+  const t = (performance.now() / 1000) * 0.5;
+  const data = new Float32Array([
+    Math.cos(t) * config.animation,
+    Math.sin(t) * config.animation,
+    (2 ** config.flangeLogSize - 1) / 2,
+    Number(config.highlightFlange),
+  ]);
+  device.queue.writeBuffer(bufConfig, 64, data);
+};
+
+const kInitSamplerDescriptor = {
+  addressModeU: 'clamp-to-edge',
+  addressModeV: 'clamp-to-edge',
+  magFilter: 'linear',
+  minFilter: 'linear',
+  mipmapFilter: 'linear',
+  lodMinClamp: 0,
+  lodMaxClamp: 4,
+  maxAnisotropy: 1,
+} as const;
+const samplerDescriptor: GPUSamplerDescriptor = { ...kInitSamplerDescriptor };
+
+const gui = new GUI();
+{
+  const buttons = {
+    initial() {
+      Object.assign(config, kInitConfig);
+      Object.assign(samplerDescriptor, kInitSamplerDescriptor);
+      gui.updateDisplay();
+    },
+    checkerboard() {
+      Object.assign(config, { flangeLogSize: 10 });
+      Object.assign(samplerDescriptor, {
+        addressModeU: 'repeat',
+        addressModeV: 'repeat',
+      });
+      gui.updateDisplay();
+    },
+    smooth() {
+      Object.assign(samplerDescriptor, {
+        magFilter: 'linear',
+        minFilter: 'linear',
+        mipmapFilter: 'linear',
+      });
+      gui.updateDisplay();
+    },
+    crunchy() {
+      Object.assign(samplerDescriptor, {
+        magFilter: 'nearest',
+        minFilter: 'nearest',
+        mipmapFilter: 'nearest',
+      });
+      gui.updateDisplay();
+    },
+  };
+  const presets = gui.addFolder('Presets');
+  presets.open();
+  presets.add(buttons, 'initial').name('reset to initial');
+  presets.add(buttons, 'checkerboard').name('checkered floor');
+  presets.add(buttons, 'smooth').name('smooth (linear)');
+  presets.add(buttons, 'crunchy').name('crunchy (nearest)');
+
+  const flangeFold = gui.addFolder('Plane settings');
+  flangeFold.open();
+  flangeFold.add(config, 'flangeLogSize', 0, 10.0, 0.1).name('size = 2**');
+  flangeFold.add(config, 'highlightFlange');
+  flangeFold.add(config, 'animation', 0, 0.5);
+
+  gui.width = 280;
+  {
+    const folder = gui.addFolder('GPUSamplerDescriptor');
+    folder.open();
+
+    const kAddressModes = ['clamp-to-edge', 'repeat', 'mirror-repeat'];
+    folder.add(samplerDescriptor, 'addressModeU', kAddressModes);
+    folder.add(samplerDescriptor, 'addressModeV', kAddressModes);
+
+    const kFilterModes = ['nearest', 'linear'];
+    folder.add(samplerDescriptor, 'magFilter', kFilterModes);
+    folder.add(samplerDescriptor, 'minFilter', kFilterModes);
+    const kMipmapFilterModes = ['nearest', 'linear'] as const;
+    folder.add(samplerDescriptor, 'mipmapFilter', kMipmapFilterModes);
+
+    const ctlMin = folder.add(samplerDescriptor, 'lodMinClamp', 0, 4, 0.1);
+    const ctlMax = folder.add(samplerDescriptor, 'lodMaxClamp', 0, 4, 0.1);
+    ctlMin.onChange((value: number) => {
+      if (samplerDescriptor.lodMaxClamp < value) ctlMax.setValue(value);
+    });
+    ctlMax.onChange((value: number) => {
+      if (samplerDescriptor.lodMinClamp > value) ctlMin.setValue(value);
+    });
+
+    {
+      const folder2 = folder.addFolder(
+        'maxAnisotropy (set only if all "linear")'
+      );
+      folder2.open();
+      const kMaxAnisotropy = 16;
+      folder2.add(samplerDescriptor, 'maxAnisotropy', 1, kMaxAnisotropy, 1);
+    }
+  }
+}
+
+//
+// Canvas setup
+//
+
+// Low-res, pixelated render target so it's easier to see fine details.
+const kCanvasSize = 200;
+const kViewportGridSize = 4;
+const kViewportGridStride = Math.floor(kCanvasSize / kViewportGridSize);
+const kViewportSize = kViewportGridStride - 2;
+
+// The canvas buffer size is 200x200.
+// Compute a canvas CSS size such that there's an integer number of device
+// pixels per canvas pixel ("integer" or "pixel-perfect" scaling).
+// Note the result may be 1 pixel off since ResizeObserver is not used.
+const kCanvasLayoutCSSSize = 600; // set by template styles
+const kCanvasLayoutDevicePixels = kCanvasLayoutCSSSize * devicePixelRatio;
+const kScaleFactor = Math.floor(kCanvasLayoutDevicePixels / kCanvasSize);
+const kCanvasDevicePixels = kScaleFactor * kCanvasSize;
+const kCanvasCSSSize = kCanvasDevicePixels / devicePixelRatio;
+canvas.style.imageRendering = 'pixelated';
+canvas.width = canvas.height = kCanvasSize;
+canvas.style.minWidth = canvas.style.maxWidth = kCanvasCSSSize + 'px';
+const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
+
+const context = canvas.getContext('webgpu') as GPUCanvasContext;
+context.configure({
+  device,
+  format: presentationFormat,
+  alphaMode: 'premultiplied',
+});
+
+//
+// Initialize test texture
+//
+
+// Set up a texture with 4 mip levels, each containing a differently-colored
+// checkerboard with 1x1 pixels (so when rendered the checkerboards are
+// different sizes). This is different from a normal mipmap where each level
+// would look like a lower-resolution version of the previous one.
+// Level 0 is 16x16 white/black
+// Level 1 is 8x8 blue/black
+// Level 2 is 4x4 yellow/black
+// Level 3 is 2x2 pink/black
+const kTextureMipLevels = 4;
+const kTextureBaseSize = 16;
+const checkerboard = device.createTexture({
+  format: 'rgba8unorm',
+  usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
+  size: [kTextureBaseSize, kTextureBaseSize],
+  mipLevelCount: 4,
+});
+const checkerboardView = checkerboard.createView();
+
+const kColorForLevel = [
+  [255, 255, 255, 255],
+  [30, 136, 229, 255], // blue
+  [255, 193, 7, 255], // yellow
+  [216, 27, 96, 255], // pink
+];
+for (let mipLevel = 0; mipLevel < kTextureMipLevels; ++mipLevel) {
+  const size = 2 ** (kTextureMipLevels - mipLevel); // 16, 8, 4, 2
+  const data = new Uint8Array(size * size * 4);
+  for (let y = 0; y < size; ++y) {
+    for (let x = 0; x < size; ++x) {
+      data.set(
+        (x + y) % 2 ? kColorForLevel[mipLevel] : [0, 0, 0, 255],
+        (y * size + x) * 4
+      );
+    }
+  }
+  device.queue.writeTexture(
+    { texture: checkerboard, mipLevel },
+    data,
+    { bytesPerRow: size * 4 },
+    [size, size]
+  );
+}
+
+//
+// "Debug" view of the actual texture contents
+//
+
+const showTextureModule = device.createShaderModule({
+  code: showTextureWGSL,
+});
+const showTexturePipeline = device.createRenderPipeline({
+  layout: 'auto',
+  vertex: { module: showTextureModule, entryPoint: 'vmain' },
+  fragment: {
+    module: showTextureModule,
+    entryPoint: 'fmain',
+    targets: [{ format: presentationFormat }],
+  },
+  primitive: { topology: 'triangle-list' },
+});
+
+const showTextureBG = device.createBindGroup({
+  layout: showTexturePipeline.getBindGroupLayout(0),
+  entries: [{ binding: 0, resource: checkerboardView }],
+});
+
+//
+// Pipeline for drawing the test squares
+//
+
+const texturedSquareModule = device.createShaderModule({
+  code: texturedSquareWGSL,
+});
+
+const texturedSquarePipeline = device.createRenderPipeline({
+  layout: 'auto',
+  vertex: {
+    module: texturedSquareModule,
+    entryPoint: 'vmain',
+    constants: { kTextureBaseSize, kViewportSize },
+  },
+  fragment: {
+    module: texturedSquareModule,
+    entryPoint: 'fmain',
+    targets: [{ format: presentationFormat }],
+  },
+  primitive: { topology: 'triangle-list' },
+});
+const texturedSquareBGL = texturedSquarePipeline.getBindGroupLayout(0);
+
+const bufConfig = device.createBuffer({
+  usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,
+  size: 128,
+});
+// View-projection matrix set up so it doesn't transform anything at z=0.
+const kCameraDist = 3;
+const viewProj = mat4.translate(
+  mat4.perspective(2 * Math.atan(1 / kCameraDist), 1, 0.1, 100),
+  [0, 0, -kCameraDist]
+);
+device.queue.writeBuffer(bufConfig, 0, viewProj as Float32Array);
+
+const bufMatrices = device.createBuffer({
+  usage: GPUBufferUsage.STORAGE,
+  size: kMatrices.byteLength,
+  mappedAtCreation: true,
+});
+new Float32Array(bufMatrices.getMappedRange()).set(kMatrices);
+bufMatrices.unmap();
+
+function frame() {
+  updateConfigBuffer();
+
+  const sampler = device.createSampler({
+    ...samplerDescriptor,
+    maxAnisotropy:
+      samplerDescriptor.minFilter === 'linear' &&
+      samplerDescriptor.magFilter === 'linear' &&
+      samplerDescriptor.mipmapFilter === 'linear'
+        ? samplerDescriptor.maxAnisotropy
+        : 1,
+  });
+
+  const bindGroup = device.createBindGroup({
+    layout: texturedSquareBGL,
+    entries: [
+      { binding: 0, resource: { buffer: bufConfig } },
+      { binding: 1, resource: { buffer: bufMatrices } },
+      { binding: 2, resource: sampler },
+      { binding: 3, resource: checkerboardView },
+    ],
+  });
+
+  const textureView = context.getCurrentTexture().createView();
+
+  const commandEncoder = device.createCommandEncoder();
+
+  const renderPassDescriptor: GPURenderPassDescriptor = {
+    colorAttachments: [
+      {
+        view: textureView,
+        clearValue: { r: 0.2, g: 0.2, b: 0.2, a: 1.0 },
+        loadOp: 'clear',
+        storeOp: 'store',
+      },
+    ],
+  };
+
+  const pass = commandEncoder.beginRenderPass(renderPassDescriptor);
+  // Draw test squares
+  pass.setPipeline(texturedSquarePipeline);
+  pass.setBindGroup(0, bindGroup);
+  for (let i = 0; i < kViewportGridSize ** 2 - 1; ++i) {
+    const vpX = kViewportGridStride * (i % kViewportGridSize) + 1;
+    const vpY = kViewportGridStride * Math.floor(i / kViewportGridSize) + 1;
+    pass.setViewport(vpX, vpY, kViewportSize, kViewportSize, 0, 1);
+    pass.draw(6, 1, 0, i);
+  }
+  // Show texture contents
+  pass.setPipeline(showTexturePipeline);
+  pass.setBindGroup(0, showTextureBG);
+  const kLastViewport = (kViewportGridSize - 1) * kViewportGridStride + 1;
+  pass.setViewport(kLastViewport, kLastViewport, 32, 32, 0, 1);
+  pass.draw(6, 1, 0, 0);
+  pass.setViewport(kLastViewport + 32, kLastViewport, 16, 16, 0, 1);
+  pass.draw(6, 1, 0, 1);
+  pass.setViewport(kLastViewport + 32, kLastViewport + 16, 8, 8, 0, 1);
+  pass.draw(6, 1, 0, 2);
+  pass.setViewport(kLastViewport + 32, kLastViewport + 24, 4, 4, 0, 1);
+  pass.draw(6, 1, 0, 3);
+  pass.end();
+
+  device.queue.submit([commandEncoder.finish()]);
+  requestAnimationFrame(frame);
+}
+
+requestAnimationFrame(frame);
diff --git a/sample/samplerParameters/meta.ts b/sample/samplerParameters/meta.ts
new file mode 100644
index 00000000..610af91b
--- /dev/null
+++ b/sample/samplerParameters/meta.ts
@@ -0,0 +1,11 @@
+export default {
+  name: 'Sampler Parameters',
+  description:
+    'Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).',
+  filename: 'sample/samplerParameters',
+  sources: [
+    { path: 'main.ts' },
+    { path: './texturedSquare.wgsl' },
+    { path: './showTexture.wgsl' },
+  ],
+};
diff --git a/src/sample/samplerParameters/showTexture.wgsl b/sample/samplerParameters/showTexture.wgsl
similarity index 100%
rename from src/sample/samplerParameters/showTexture.wgsl
rename to sample/samplerParameters/showTexture.wgsl
diff --git a/src/sample/samplerParameters/texturedSquare.wgsl b/sample/samplerParameters/texturedSquare.wgsl
similarity index 100%
rename from src/sample/samplerParameters/texturedSquare.wgsl
rename to sample/samplerParameters/texturedSquare.wgsl
diff --git a/src/sample/shadowMapping/fragment.wgsl b/sample/shadowMapping/fragment.wgsl
similarity index 100%
rename from src/sample/shadowMapping/fragment.wgsl
rename to sample/shadowMapping/fragment.wgsl
diff --git a/sample/shadowMapping/index.html b/sample/shadowMapping/index.html
new file mode 100644
index 00000000..97046f00
--- /dev/null
+++ b/sample/shadowMapping/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: shadowMapping 
+    
+    
+    
+  
+  
+     = [
+  {
+    arrayStride: Float32Array.BYTES_PER_ELEMENT * 6,
+    attributes: [
+      {
+        // position
+        shaderLocation: 0,
+        offset: 0,
+        format: 'float32x3',
+      },
+      {
+        // normal
+        shaderLocation: 1,
+        offset: Float32Array.BYTES_PER_ELEMENT * 3,
+        format: 'float32x3',
+      },
+    ],
+  },
+];
+
+const primitive: GPUPrimitiveState = {
+  topology: 'triangle-list',
+  cullMode: 'back',
+};
+
+const uniformBufferBindGroupLayout = device.createBindGroupLayout({
+  entries: [
+    {
+      binding: 0,
+      visibility: GPUShaderStage.VERTEX,
+      buffer: {
+        type: 'uniform',
+      },
+    },
+  ],
+});
+
+const shadowPipeline = device.createRenderPipeline({
+  layout: device.createPipelineLayout({
+    bindGroupLayouts: [
+      uniformBufferBindGroupLayout,
+      uniformBufferBindGroupLayout,
+    ],
+  }),
+  vertex: {
+    module: device.createShaderModule({
+      code: vertexShadowWGSL,
+    }),
+    entryPoint: 'main',
+    buffers: vertexBuffers,
+  },
+  depthStencil: {
+    depthWriteEnabled: true,
+    depthCompare: 'less',
+    format: 'depth32float',
+  },
+  primitive,
+});
+
+// Create a bind group layout which holds the scene uniforms and
+// the texture+sampler for depth. We create it manually because the WebPU
+// implementation doesn't infer this from the shader (yet).
+const bglForRender = device.createBindGroupLayout({
+  entries: [
+    {
+      binding: 0,
+      visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
+      buffer: {
+        type: 'uniform',
+      },
+    },
+    {
+      binding: 1,
+      visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
+      texture: {
+        sampleType: 'depth',
+      },
+    },
+    {
+      binding: 2,
+      visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
+      sampler: {
+        type: 'comparison',
+      },
+    },
+  ],
+});
+
+const pipeline = device.createRenderPipeline({
+  layout: device.createPipelineLayout({
+    bindGroupLayouts: [bglForRender, uniformBufferBindGroupLayout],
+  }),
+  vertex: {
+    module: device.createShaderModule({
+      code: vertexWGSL,
+    }),
+    entryPoint: 'main',
+    buffers: vertexBuffers,
+  },
+  fragment: {
+    module: device.createShaderModule({
+      code: fragmentWGSL,
+    }),
+    entryPoint: 'main',
+    targets: [
+      {
+        format: presentationFormat,
+      },
+    ],
+    constants: {
+      shadowDepthTextureSize,
+    },
+  },
+  depthStencil: {
+    depthWriteEnabled: true,
+    depthCompare: 'less',
+    format: 'depth24plus-stencil8',
+  },
+  primitive,
+});
+
+const depthTexture = device.createTexture({
+  size: [canvas.width, canvas.height],
+  format: 'depth24plus-stencil8',
+  usage: GPUTextureUsage.RENDER_ATTACHMENT,
+});
+
+const renderPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [
+    {
+      // view is acquired and set in render loop.
+      view: undefined,
+
+      clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+      loadOp: 'clear',
+      storeOp: 'store',
+    },
+  ],
+  depthStencilAttachment: {
+    view: depthTexture.createView(),
+
+    depthClearValue: 1.0,
+    depthLoadOp: 'clear',
+    depthStoreOp: 'store',
+    stencilClearValue: 0,
+    stencilLoadOp: 'clear',
+    stencilStoreOp: 'store',
+  },
+};
+
+const modelUniformBuffer = device.createBuffer({
+  size: 4 * 16, // 4x4 matrix
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+const sceneUniformBuffer = device.createBuffer({
+  // Two 4x4 viewProj matrices,
+  // one for the camera and one for the light.
+  // Then a vec3 for the light position.
+  // Rounded to the nearest multiple of 16.
+  size: 2 * 4 * 16 + 4 * 4,
+  usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+const sceneBindGroupForShadow = device.createBindGroup({
+  layout: uniformBufferBindGroupLayout,
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: sceneUniformBuffer,
+      },
+    },
+  ],
+});
+
+const sceneBindGroupForRender = device.createBindGroup({
+  layout: bglForRender,
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: sceneUniformBuffer,
+      },
+    },
+    {
+      binding: 1,
+      resource: shadowDepthTextureView,
+    },
+    {
+      binding: 2,
+      resource: device.createSampler({
+        compare: 'less',
+      }),
+    },
+  ],
+});
+
+const modelBindGroup = device.createBindGroup({
+  layout: uniformBufferBindGroupLayout,
+  entries: [
+    {
+      binding: 0,
+      resource: {
+        buffer: modelUniformBuffer,
+      },
+    },
+  ],
+});
+
+const eyePosition = vec3.fromValues(0, 50, -100);
+const upVector = vec3.fromValues(0, 1, 0);
+const origin = vec3.fromValues(0, 0, 0);
+
+const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 2000.0);
+
+const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);
+
+const lightPosition = vec3.fromValues(50, 100, -100);
+const lightViewMatrix = mat4.lookAt(lightPosition, origin, upVector);
+const lightProjectionMatrix = mat4.create();
+{
+  const left = -80;
+  const right = 80;
+  const bottom = -80;
+  const top = 80;
+  const near = -200;
+  const far = 300;
+  mat4.ortho(left, right, bottom, top, near, far, lightProjectionMatrix);
+}
+
+const lightViewProjMatrix = mat4.multiply(
+  lightProjectionMatrix,
+  lightViewMatrix
+);
+
+const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);
+
+// Move the model so it's centered.
+const modelMatrix = mat4.translation([0, -45, 0]);
+
+// The camera/light aren't moving, so write them into buffers now.
+{
+  const lightMatrixData = lightViewProjMatrix as Float32Array;
+  device.queue.writeBuffer(
+    sceneUniformBuffer,
+    0,
+    lightMatrixData.buffer,
+    lightMatrixData.byteOffset,
+    lightMatrixData.byteLength
+  );
+
+  const cameraMatrixData = viewProjMatrix as Float32Array;
+  device.queue.writeBuffer(
+    sceneUniformBuffer,
+    64,
+    cameraMatrixData.buffer,
+    cameraMatrixData.byteOffset,
+    cameraMatrixData.byteLength
+  );
+
+  const lightData = lightPosition as Float32Array;
+  device.queue.writeBuffer(
+    sceneUniformBuffer,
+    128,
+    lightData.buffer,
+    lightData.byteOffset,
+    lightData.byteLength
+  );
+
+  const modelData = modelMatrix as Float32Array;
+  device.queue.writeBuffer(
+    modelUniformBuffer,
+    0,
+    modelData.buffer,
+    modelData.byteOffset,
+    modelData.byteLength
+  );
+}
+
+// Rotates the camera around the origin based on time.
+function getCameraViewProjMatrix() {
+  const eyePosition = vec3.fromValues(0, 50, -100);
+
+  const rad = Math.PI * (Date.now() / 2000);
+  const rotation = mat4.rotateY(mat4.translation(origin), rad);
+  vec3.transformMat4(eyePosition, rotation, eyePosition);
+
+  const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);
+
+  mat4.multiply(projectionMatrix, viewMatrix, viewProjMatrix);
+  return viewProjMatrix as Float32Array;
+}
+
+const shadowPassDescriptor: GPURenderPassDescriptor = {
+  colorAttachments: [],
+  depthStencilAttachment: {
+    view: shadowDepthTextureView,
+    depthClearValue: 1.0,
+    depthLoadOp: 'clear',
+    depthStoreOp: 'store',
+  },
+};
+
+function frame() {
+  const cameraViewProj = getCameraViewProjMatrix();
+  device.queue.writeBuffer(
+    sceneUniformBuffer,
+    64,
+    cameraViewProj.buffer,
+    cameraViewProj.byteOffset,
+    cameraViewProj.byteLength
+  );
+
+  renderPassDescriptor.colorAttachments[0].view = context
+    .getCurrentTexture()
+    .createView();
+
+  const commandEncoder = device.createCommandEncoder();
+  {
+    const shadowPass = commandEncoder.beginRenderPass(shadowPassDescriptor);
+    shadowPass.setPipeline(shadowPipeline);
+    shadowPass.setBindGroup(0, sceneBindGroupForShadow);
+    shadowPass.setBindGroup(1, modelBindGroup);
+    shadowPass.setVertexBuffer(0, vertexBuffer);
+    shadowPass.setIndexBuffer(indexBuffer, 'uint16');
+    shadowPass.drawIndexed(indexCount);
+
+    shadowPass.end();
+  }
+  {
+    const renderPass = commandEncoder.beginRenderPass(renderPassDescriptor);
+    renderPass.setPipeline(pipeline);
+    renderPass.setBindGroup(0, sceneBindGroupForRender);
+    renderPass.setBindGroup(1, modelBindGroup);
+    renderPass.setVertexBuffer(0, vertexBuffer);
+    renderPass.setIndexBuffer(indexBuffer, 'uint16');
+    renderPass.drawIndexed(indexCount);
+
+    renderPass.end();
+  }
+  device.queue.submit([commandEncoder.finish()]);
+  requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/shadowMapping/meta.ts b/sample/shadowMapping/meta.ts
new file mode 100644
index 00000000..cc958524
--- /dev/null
+++ b/sample/shadowMapping/meta.ts
@@ -0,0 +1,12 @@
+export default {
+  name: 'Shadow Mapping',
+  description:
+    'This example shows how to sample from a depth texture to render shadows.',
+  filename: 'sample/shadowMapping',
+  sources: [
+    { path: 'main.ts' },
+    { path: 'vertexShadow.wgsl' },
+    { path: 'vertex.wgsl' },
+    { path: 'fragment.wgsl' },
+  ],
+};
diff --git a/src/sample/shadowMapping/vertex.wgsl b/sample/shadowMapping/vertex.wgsl
similarity index 100%
rename from src/sample/shadowMapping/vertex.wgsl
rename to sample/shadowMapping/vertex.wgsl
diff --git a/src/sample/shadowMapping/vertexShadow.wgsl b/sample/shadowMapping/vertexShadow.wgsl
similarity index 100%
rename from src/sample/shadowMapping/vertexShadow.wgsl
rename to sample/shadowMapping/vertexShadow.wgsl
diff --git a/src/sample/skinnedMesh/glbUtils.ts b/sample/skinnedMesh/glbUtils.ts
similarity index 99%
rename from src/sample/skinnedMesh/glbUtils.ts
rename to sample/skinnedMesh/glbUtils.ts
index 30b8c760..c2c8643c 100644
--- a/src/sample/skinnedMesh/glbUtils.ts
+++ b/sample/skinnedMesh/glbUtils.ts
@@ -1,4 +1,4 @@
-import { Quat } from 'wgpu-matrix/dist/2.x/quat';
+import { Quat } from 'wgpu-matrix';
 import { Accessor, BufferView, GlTf, Scene } from './gltf';
 import { Mat4, Vec3, mat4 } from 'wgpu-matrix';
 
@@ -821,7 +821,6 @@ export const convertGLBToJSONAndBinary = async (
     new TextDecoder('utf-8').decode(new Uint8Array(buffer, 20, jsonChunkLength))
   );
 
-  console.log(jsonChunk);
   // Binary data located after jsonChunk
   const binaryHeader = new Uint32Array(buffer, 20 + jsonChunkLength, 2);
   validateBinaryHeader(binaryHeader);
diff --git a/src/sample/skinnedMesh/gltf.ts b/sample/skinnedMesh/gltf.ts
similarity index 98%
rename from src/sample/skinnedMesh/gltf.ts
rename to sample/skinnedMesh/gltf.ts
index d5325f22..a3d46b10 100644
--- a/src/sample/skinnedMesh/gltf.ts
+++ b/sample/skinnedMesh/gltf.ts
@@ -1,6 +1,8 @@
 import { Mat4 } from 'wgpu-matrix';
 import { GLTFNode } from './glbUtils';
 
+/* eslint @typescript-eslint/no-explicit-any: "off" */
+
 /* Sourced from https://github.com/bwasty/gltf-loader-ts/blob/master/source/gltf.ts */
 /* License for use can be found here: https://github.com/bwasty/gltf-loader-ts/blob/master/LICENSE */
 /* Comments and types have been excluded from original source for sake of cleanliness and brevity */
diff --git a/src/sample/skinnedMesh/gltf.wgsl b/sample/skinnedMesh/gltf.wgsl
similarity index 100%
rename from src/sample/skinnedMesh/gltf.wgsl
rename to sample/skinnedMesh/gltf.wgsl
diff --git a/src/sample/skinnedMesh/grid.wgsl b/sample/skinnedMesh/grid.wgsl
similarity index 100%
rename from src/sample/skinnedMesh/grid.wgsl
rename to sample/skinnedMesh/grid.wgsl
diff --git a/src/sample/skinnedMesh/gridData.ts b/sample/skinnedMesh/gridData.ts
similarity index 100%
rename from src/sample/skinnedMesh/gridData.ts
rename to sample/skinnedMesh/gridData.ts
diff --git a/src/sample/skinnedMesh/gridUtils.ts b/sample/skinnedMesh/gridUtils.ts
similarity index 100%
rename from src/sample/skinnedMesh/gridUtils.ts
rename to sample/skinnedMesh/gridUtils.ts
diff --git a/sample/skinnedMesh/index.html b/sample/skinnedMesh/index.html
new file mode 100644
index 00000000..3645b76e
--- /dev/null
+++ b/sample/skinnedMesh/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: skinnedMesh 
+    
+    
+    
+  
+  
+    ();
+const animWhaleSkin = (skin: GLTFSkin, angle: number) => {
+  for (let i = 0; i < skin.joints.length; i++) {
+    // Index into the current joint
+    const joint = skin.joints[i];
+    // If our map does
+    if (!origMatrices.has(joint)) {
+      origMatrices.set(joint, whaleScene.nodes[joint].source.getMatrix());
+    }
+    // Get the original position, rotation, and scale of the current joint
+    const origMatrix = origMatrices.get(joint);
+    let m = mat4.create();
+    // Depending on which bone we are accessing, apply a specific rotation to the bone's original
+    // transformation to animate it
+    if (joint === 1 || joint === 0) {
+      m = mat4.rotateY(origMatrix, -angle);
+    } else if (joint === 3 || joint === 4) {
+      m = mat4.rotateX(origMatrix, joint === 3 ? angle : -angle);
+    } else {
+      m = mat4.rotateZ(origMatrix, angle);
+    }
+    // Apply the current transformation to the transform values within the relevant nodes
+    // (these nodes, of course, each being nodes that represent joints/bones)
+    whaleScene.nodes[joint].source.position = mat4.getTranslation(m);
+    whaleScene.nodes[joint].source.scale = mat4.getScaling(m);
+    whaleScene.nodes[joint].source.rotation = getRotation(m);
+  }
+};
+
+function frame() {
+  // Calculate camera matrices
+  const projectionMatrix = getProjectionMatrix();
+  const viewMatrix = getViewMatrix();
+  const modelMatrix = getModelMatrix();
+
+  // Calculate bone transformation
+  const t = (Date.now() / 20000) * settings.speed;
+  const angle = Math.sin(t) * settings.angle;
+  // Compute Transforms when angle is applied
+  animSkinnedGrid(gridBoneCollection.transforms, angle);
+
+  // Write to mvp to camera buffer
+  device.queue.writeBuffer(
+    cameraBuffer,
+    0,
+    projectionMatrix.buffer,
+    projectionMatrix.byteOffset,
+    projectionMatrix.byteLength
+  );
+
+  device.queue.writeBuffer(
+    cameraBuffer,
+    64,
+    viewMatrix.buffer,
+    viewMatrix.byteOffset,
+    viewMatrix.byteLength
+  );
+
+  device.queue.writeBuffer(
+    cameraBuffer,
+    128,
+    modelMatrix.buffer,
+    modelMatrix.byteOffset,
+    modelMatrix.byteLength
+  );
+
+  // Write to skinned grid bone uniform buffer
+  for (let i = 0; i < gridBoneCollection.transforms.length; i++) {
+    device.queue.writeBuffer(
+      skinnedGridJointUniformBuffer,
+      i * 64,
+      gridBoneCollection.transforms[i] as Float32Array
+    );
+  }
+
+  // Difference between these two render passes is just the presence of depthTexture
+  gltfRenderPassDescriptor.colorAttachments[0].view = context
+    .getCurrentTexture()
+    .createView();
+
+  skinnedGridRenderPassDescriptor.colorAttachments[0].view = context
+    .getCurrentTexture()
+    .createView();
+
+  // Update node matrixes
+  for (const scene of whaleScene.scenes) {
+    scene.root.updateWorldMatrix(device);
+  }
+
+  // Updates skins (we index into skins in the renderer, which is not the best approach but hey)
+  animWhaleSkin(whaleScene.skins[0], Math.sin(t) * settings.angle);
+  // Node 6 should be the only node with a drawable mesh so hopefully this works fine
+  whaleScene.skins[0].update(device, 6, whaleScene.nodes);
+
+  const commandEncoder = device.createCommandEncoder();
+  if (settings.object === 'Whale') {
+    const passEncoder = commandEncoder.beginRenderPass(
+      gltfRenderPassDescriptor
+    );
+    for (const scene of whaleScene.scenes) {
+      scene.root.renderDrawables(passEncoder, [
+        cameraBGCluster.bindGroups[0],
+        generalUniformsBGCLuster.bindGroups[0],
+      ]);
+    }
+    passEncoder.end();
+  } else {
+    // Our skinned grid isn't checking for depth, so we pass it
+    // a separate render descriptor that does not take in a depth texture
+    const passEncoder = commandEncoder.beginRenderPass(
+      skinnedGridRenderPassDescriptor
+    );
+    passEncoder.setPipeline(skinnedGridPipeline);
+    passEncoder.setBindGroup(0, cameraBGCluster.bindGroups[0]);
+    passEncoder.setBindGroup(1, generalUniformsBGCLuster.bindGroups[0]);
+    passEncoder.setBindGroup(2, skinnedGridBoneBGCluster.bindGroups[0]);
+    // Pass in vertex and index buffers generated from our static skinned grid
+    // data at ./gridData.ts
+    passEncoder.setVertexBuffer(0, skinnedGridVertexBuffers.positions);
+    passEncoder.setVertexBuffer(1, skinnedGridVertexBuffers.joints);
+    passEncoder.setVertexBuffer(2, skinnedGridVertexBuffers.weights);
+    passEncoder.setIndexBuffer(skinnedGridVertexBuffers.indices, 'uint16');
+    passEncoder.drawIndexed(gridIndices.length, 1);
+    passEncoder.end();
+  }
+
+  device.queue.submit([commandEncoder.finish()]);
+
+  requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/skinnedMesh/meta.ts b/sample/skinnedMesh/meta.ts
new file mode 100644
index 00000000..65843c88
--- /dev/null
+++ b/sample/skinnedMesh/meta.ts
@@ -0,0 +1,15 @@
+export default {
+  name: 'Skinned Mesh',
+  description:
+    'A demonstration of basic gltf loading and mesh skinning, ported from https://webgl2fundamentals.org/webgl/lessons/webgl-skinning.html. Mesh data, per vertex attributes, and skin inverseBindMatrices are taken from the json parsed from the binary output of the .glb file. Animations are generated progrmatically, with animated joint matrices updated and passed to shaders per frame via uniform buffers.',
+  filename: 'sample/skinnedMesh',
+  sources: [
+    { path: 'main.ts' },
+    { path: 'gridData.ts' },
+    { path: 'gridUtils.ts' },
+    { path: 'grid.wgsl' },
+    { path: 'gltf.ts' },
+    { path: 'glbUtils.ts' },
+    { path: 'gltf.wgsl' },
+  ],
+};
diff --git a/sample/texturedCube/index.html b/sample/texturedCube/index.html
new file mode 100644
index 00000000..4e1eb2f5
--- /dev/null
+++ b/sample/texturedCube/index.html
@@ -0,0 +1,27 @@
+
+
+  
+    webgpu-samples: texturedCube 
+    
+    
+    
+  
+  
+    webgpu-samples: twoCubes 
+    
+    
+    
+  
+  
+    webgpu-samples: videoUploading 
+    
+    
+    
+  
+  
+    webgpu-samples: worker 
+    
+    
+    
+  
+  
+     {
   return vec4(1.0, 0.0, 0.0, 1.0);
-}
+}
\ No newline at end of file
diff --git a/src/shaders/sampleExternalTexture.frag.wgsl b/shaders/sampleExternalTexture.frag.wgsl
similarity index 100%
rename from src/shaders/sampleExternalTexture.frag.wgsl
rename to shaders/sampleExternalTexture.frag.wgsl
diff --git a/src/shaders/triangle.vert.wgsl b/shaders/triangle.vert.wgsl
similarity index 100%
rename from src/shaders/triangle.vert.wgsl
rename to shaders/triangle.vert.wgsl
diff --git a/src/shaders/vertexPositionColor.frag.wgsl b/shaders/vertexPositionColor.frag.wgsl
similarity index 100%
rename from src/shaders/vertexPositionColor.frag.wgsl
rename to shaders/vertexPositionColor.frag.wgsl
diff --git a/src/components/SampleCategory.module.css b/src/components/SampleCategory.module.css
deleted file mode 100644
index e1f2854e..00000000
--- a/src/components/SampleCategory.module.css
+++ /dev/null
@@ -1,28 +0,0 @@
-.sampleCategory {
-  margin-top: 5px;
-  margin-bottom: 5px;
-  display: inline-block;
-}
-
-.sampleCategoryDescription {
-  position: absolute;
-  background-color: rgba(255, 255, 255, 0.9);
-  box-shadow: 0 0 5px 10px rgba(255, 255, 255, 0.9);
-  border-radius: 10px;
-  transition: opacity 0.3s ease-in, transform 0.2s ease-out
-}
-
-.sampleCategoryDescription[data-active='true'] {
-  opacity: 1;
-  transform: translateY(-0.5em);
-}
-
-.sampleCategoryDescription[data-active='false'] {
-  opacity: 0;
-  pointer-events: none;
-  transform: translateY(0.25em);
-}
-
-li.selected a {
-  color: #ff0000;
-}
\ No newline at end of file
diff --git a/src/components/SampleCategory.tsx b/src/components/SampleCategory.tsx
deleted file mode 100644
index bf83532f..00000000
--- a/src/components/SampleCategory.tsx
+++ /dev/null
@@ -1,102 +0,0 @@
-import styles from './SampleCategory.module.css';
-import { useState } from 'react';
-
-import { NextRouter } from 'next/router';
-import Link from 'next/link';
-import { PageCategory } from '../pages/samples/[slug]';
-
-type PageType = {
-  [key: string]: React.ComponentType & { render: { preload: () => void } };
-};
-
-type PageComponentType = {
-  [key: string]: React.ComponentType;
-};
-
-interface SampleCategoryProps {
-  category: PageCategory;
-  router: NextRouter;
-  onClickPageLink: () => void;
-}
-
-export const SampleCategory = ({
-  category,
-  onClickPageLink,
-  router,
-}: SampleCategoryProps) => {
-  const [displayDescription, setDisplayDescription] = useState(false);
-  const { title, pages, sampleNames } = category;
-  return (
-    
-      
 {
-          setDisplayDescription(true);
-        }}
-        onMouseLeave={() => {
-          setDisplayDescription(false);
-        }}
-      >
-        
-          {title}
-         
-
-        
-          {category.description}
-        
-      
-      {sampleNames.map((slug) => {
-        return (
-          
 onClickPageLink()}
-          />
-        );
-      })}
-      {
-        (pages as PageType)[slug].render.preload();
-      }}
-    >
-       
-  );
-};
diff --git a/src/components/SampleLayout.tsx b/src/components/SampleLayout.tsx
deleted file mode 100644
index db39d5e4..00000000
--- a/src/components/SampleLayout.tsx
+++ /dev/null
@@ -1,296 +0,0 @@
-import Head from 'next/head';
-import { useRouter } from 'next/router';
-import { useEffect, useMemo, useRef, useState } from 'react';
-
-import type { GUI } from 'dat.gui';
-import type { Stats } from 'stats-js';
-import type { Editor, EditorConfiguration } from 'codemirror';
-interface CodeMirrorEditor extends Editor {
-  updatedSource: (source: string) => void;
-}
-
-import styles from './SampleLayout.module.css';
-
-type SourceFileInfo = {
-  name: string;
-  contents: string;
-  editable?: boolean;
-};
-
-export type SampleInit = (params: {
-  canvas: HTMLCanvasElement;
-  pageState: { active: boolean };
-  gui?: GUI;
-  stats?: Stats;
-}) => void | Promise;
-
-if (process.browser) {
-  require('codemirror/mode/javascript/javascript');
-}
-
-function makeCodeMirrorEditor(source: string) {
-  const configuration: EditorConfiguration = {
-    lineNumbers: true,
-    lineWrapping: true,
-    theme: 'monokai',
-    readOnly: true,
-  };
-
-  let el: HTMLDivElement | null = null;
-  let editor: CodeMirrorEditor;
-
-  if (process.browser) {
-    el = document.createElement('div');
-    const CodeMirror = process.browser && require('codemirror');
-    editor = CodeMirror(el, configuration);
-  }
-
-  function Container(props: React.ComponentProps<'div'>) {
-    return (
-      
-        
 {
-            if (el && div) {
-              div.appendChild(el);
-              editor.setOption('value', source);
-            }
-          }}
-        />
-      
-    );
-  }
-  return {
-    Container,
-  };
-}
-
-const SampleLayout: React.FunctionComponent<
-  React.PropsWithChildren<{
-    name: string;
-    description: string;
-    originTrial?: string;
-    filename: string;
-    gui?: boolean;
-    stats?: boolean;
-    init: SampleInit;
-    sources: SourceFileInfo[];
-  }>
-> = (props) => {
-  const canvasRef = useRef
(null);
-  const navRef = useRef(null);
-  const sources = useMemo(
-    () =>
-      props.sources.map(({ name, contents }) => {
-        return { name, ...makeCodeMirrorEditor(contents) };
-      }),
-    props.sources
-  );
-
-  const guiParentRef = useRef(null);
-  const gui: GUI | undefined = useMemo(() => {
-    if (props.gui && process.browser) {
-      // eslint-disable-next-line @typescript-eslint/no-var-requires
-      const dat = require('dat.gui');
-      const gui = new dat.GUI({ autoPlace: false });
-      // HACK: Make
-      gui.domElement.style.position = 'relative';
-      gui.domElement.style.zIndex = '1000';
-      return gui;
-    }
-    return undefined;
-  }, []);
-
-  const statsParentRef = useRef(null);
-  const stats: Stats | undefined = useMemo(() => {
-    if (props.stats && process.browser) {
-      // eslint-disable-next-line @typescript-eslint/no-var-requires
-      const Stats = require('stats-js');
-      return new Stats();
-    }
-    return undefined;
-  }, []);
-
-  const router = useRouter();
-  const currentHash = router.asPath.match(/#([a-zA-Z0-9\.\/]+)/);
-
-  const [error, setError] = useState(null);
-
-  const [activeHash, setActiveHash] = useState(null);
-  useEffect(() => {
-    if (currentHash) {
-      setActiveHash(currentHash[1]);
-    } else {
-      setActiveHash(sources[0].name);
-    }
-
-    if (gui && guiParentRef.current) {
-      guiParentRef.current.appendChild(gui.domElement);
-
-      // HACK: useEffect() is sometimes called twice, resulting in the GUI being populated twice.
-      // Erase any existing controllers before calling init() on the sample.
-      while (gui.__controllers.length > 0) {
-        gui.__controllers[0].remove();
-      }
-    }
-
-    if (stats && statsParentRef.current) {
-      stats.dom.style.position = 'absolute';
-      stats.showPanel(1); // 0: fps, 1: ms, 2: mb, 3+: custom
-      statsParentRef.current.appendChild(stats.dom);
-    }
-
-    const pageState = {
-      active: true,
-    };
-    const cleanup = () => {
-      pageState.active = false;
-    };
-    try {
-      const canvas = canvasRef.current;
-      if (!canvas) {
-        throw new Error('The canvas is not available');
-      }
-      const p = props.init({
-        canvas,
-        pageState,
-        gui,
-        stats,
-      });
-
-      if (p instanceof Promise) {
-        p.catch((err: Error) => {
-          console.error(err);
-          setError(err);
-        });
-      }
-    } catch (err) {
-      console.error(err);
-      setError(err);
-    }
-    return cleanup;
-  }, []);
-
-  return (
-    
-      
-        
-        {`${props.name} - WebGPU Samples`} 
-        
-        
{props.name} 
-        
-          See it on Github!
-         
-        
{props.description}
-        {error ? (
-          <>
-            
-              Something went wrong. Do your browser and device support WebGPU?
-            
-            
{`${error}`}
-          >
-        ) : null}
-      
-        
-           {
-              const element = event.currentTarget;
-              const scrollRight =
-                element.scrollWidth - element.clientWidth - element.scrollLeft;
-              if (element.scrollLeft > 25) {
-                navRef.current.setAttribute('data-left', 'true');
-              } else {
-                navRef.current.setAttribute('data-left', 'false');
-              }
-              if (scrollRight > 25) {
-                navRef.current.setAttribute('data-right', 'true');
-              } else {
-                navRef.current.setAttribute('data-right', 'false');
-              }
-            }}
-          >
-            
-          
 
-        {sources.map((src, i) => {
-          return (
-            
-          );
-        })}
-      
 
-  );
-};
-
-export default SampleLayout;
-
-export const makeSample: (
-  ...props: Parameters
-) => JSX.Element = (props) => {
-  return ();
+
+// Generate the list of samples
+for (const { title, description, samples } of pageCategories) {
+  for (const [key, sampleInfo] of Object.entries(samples)) {
+    samplesByKey.set(key, sampleInfo);
+  }
+
+  sampleListElem.appendChild(
+    el('ul', { className: 'exampleList' }, [
+      el('div', {}, [
+        el('div', { className: 'sampleCategory' }, [
+          el('h3', {
+            style: { 'margin-top': '5px' },
+            textContent: title,
+            dataset: { tooltip: description },
+          }),
+        ]),
+        ...Object.entries(samples).map(([key, sampleInfo]) =>
+          el('li', {}, [
+            el('a', {
+              href: sampleInfo.filename,
+              onClick: (e: PointerEvent) => {
+                setSampleIFrameURL(e, sampleInfo);
+              },
+              textContent: sampleInfo.tocName || key,
+            }),
+          ])
+        ),
+      ]),
+    ])
+  );
+}
+
+/**
+ * Parse the page's current URL and then set the iframe appropriately.
+ */
+function parseURL() {
+  const url = new URL(location.toString());
+
+  const sample = url.searchParams.get('sample') || '';
+  const sampleUrl = new URL(sample, location.href);
+  const sampleInfo = samplesByKey.get(basename(sampleUrl.pathname));
+  setSampleIFrame(sampleInfo, sampleUrl.search);
+  if (sampleInfo) {
+    const hash = basename(url.hash.substring(1));
+    const sourceInfo =
+      sampleInfo.sources.find(({ path }) => basename(path) === hash) ||
+      sampleInfo.sources[0];
+    setSourceTab(sourceInfo);
+  }
+}
+
+/**
+ * Respond to messages from iframes. We have no way of knowing the size
+ * of an example so there's a helper in `iframe-helper.js` that lets
+ * the iframe tell us the size it needs (and possibly other things).
+ * This lets us adjust the size of the iframe.
+ */
+window.addEventListener('message', (e) => {
+  const { cmd, data } = e.data;
+  switch (cmd) {
+    case 'resize': {
+      sampleContainerElem.style.height = `${data.height}px`;
+      break;
+    }
+    default:
+      throw new Error(`unknown message cmd: ${cmd}`);
+  }
+});
+
+// Parse the first URL.
+parseURL();
diff --git a/src/pages/_app.tsx b/src/pages/_app.tsx
deleted file mode 100644
index a44ec918..00000000
--- a/src/pages/_app.tsx
+++ /dev/null
@@ -1,105 +0,0 @@
-import Head from 'next/head';
-import { AppProps } from 'next/app';
-import Link from 'next/link';
-import { useRouter } from 'next/router';
-import { useMemo, memo, useState } from 'react';
-
-import './styles.css';
-import styles from './MainLayout.module.css';
-
-import { pageCategories } from './samples/[slug]';
-import { SampleCategory } from '../components/SampleCategory';
-
-const title = 'WebGPU Samples';
-
-const MainLayout: React.FunctionComponent = ({
-  Component,
-  pageProps,
-}) => {
-  const router = useRouter();
-  const [listExpanded, setListExpanded] = useState(false);
-
-  const ComponentMemo = useMemo(() => {
-    return memo(Component);
-  }, [Component]);
-
-  const oldPathSyntaxMatch = router.asPath.match(/(\?wgsl=[01])#(\S+)/);
-  if (oldPathSyntaxMatch) {
-    const slug = oldPathSyntaxMatch[2];
-    router.replace(`/samples/${slug}`);
-    return <>>;
-  }
-
-  return (
-    <>
-      
-        {title} 
-        
-        
-          
-              {
-                setListExpanded(!listExpanded);
-              }}
-            >
-          
-          
-            
-              Github
-             
-            
-            {pageCategories.map((category) => {
-              return (
-                
-                   setListExpanded(false)}
-                  />
-                  
-              );
-            })}
-            
-            
Other Pages 
-            
-          
 
-        
-      
-      
-        The WebGPU Samples are a set of samples and demos demonstrating the use
-        of the WebGPU API . Please see the current
-        implementation status and how to run WebGPU in your browser at{' '}
-        webgpu.io .
-      
-     
-  );
-};
-
-export default HomePage;
diff --git a/src/pages/samples/[slug].tsx b/src/pages/samples/[slug].tsx
deleted file mode 100644
index ab03cede..00000000
--- a/src/pages/samples/[slug].tsx
+++ /dev/null
@@ -1,174 +0,0 @@
-import dynamic from 'next/dynamic';
-import { GetStaticPaths, GetStaticProps } from 'next';
-
-type PathParams = {
-  slug: string;
-};
-
-type Props = {
-  slug: string;
-};
-
-type PageComponentType = {
-  [key: string]: React.ComponentType;
-};
-
-// Samples that implement basic rendering functionality using the WebGPU API.
-const graphicsBasicsPages: PageComponentType = {
-  helloTriangle: dynamic(() => import('../../sample/helloTriangle/main')),
-  helloTriangleMSAA: dynamic(
-    () => import('../../sample/helloTriangleMSAA/main')
-  ),
-  rotatingCube: dynamic(() => import('../../sample/rotatingCube/main')),
-  twoCubes: dynamic(() => import('../../sample/twoCubes/main')),
-  texturedCube: dynamic(() => import('../../sample/texturedCube/main')),
-  samplerParameters: dynamic(
-    () => import('../../sample/samplerParameters/main')
-  ),
-  instancedCube: dynamic(() => import('../../sample/instancedCube/main')),
-  fractalCube: dynamic(() => import('../../sample/fractalCube/main')),
-  cubemap: dynamic(() => import('../../sample/cubemap/main')),
-};
-
-// Samples that demonstrate functionality specific to WebGPU, or demonstrate the particularities
-// of how WebGPU implements a particular feature within its api. For instance, while many of the
-// sampler parameters in the 'samplerParameters' sample have direct analogues in other graphics api,
-// the primary purpose of 'sampleParameters' is to demonstrate their specific nomenclature and
-// functionality within the context of the WebGPU API.
-const webGPUFeaturesPages: PageComponentType = {
-  reversedZ: dynamic(() => import('../../sample/reversedZ/main')),
-  renderBundles: dynamic(() => import('../../sample/renderBundles/main')),
-};
-
-// A selection of samples demonstrating various graphics techniques, utilizing various features
-// of the WebGPU API, and often executing render and compute pipelines in tandem to achieve their
-// visual results. The techniques demonstrated may even be independent of WebGPU (e.g. 'cameras')
-const graphicsDemoPages: PageComponentType = {
-  cameras: dynamic(() => import('../../sample/cameras/main')),
-  normalMap: dynamic(() => import('../../sample/normalMap/main')),
-  shadowMapping: dynamic(() => import('../../sample/shadowMapping/main')),
-  deferredRendering: dynamic(
-    () => import('../../sample/deferredRendering/main')
-  ),
-  particles: dynamic(() => import('../../sample/particles/main')),
-  imageBlur: dynamic(() => import('../../sample/imageBlur/main')),
-  cornell: dynamic(() => import('../../sample/cornell/main')),
-  'A-buffer': dynamic(() => import('../../sample/a-buffer/main')),
-  skinnedMesh: dynamic(() => import('../../sample/skinnedMesh/main')),
-};
-
-// Samples that demonstrate the GPGPU functionality of WebGPU. These samples generally provide some
-// user-facing representation (e.g. image, text, or audio) of the result of compute operations.
-// Any rendering code is primarily for visualization, not key to the unique part of the sample;
-// rendering could also be done using canvas2D without detracting from the sample's usefulness.
-const gpuComputeDemoPages: PageComponentType = {
-  computeBoids: dynamic(() => import('../../sample/computeBoids/main')),
-  gameOfLife: dynamic(() => import('../../sample/gameOfLife/main')),
-  bitonicSort: dynamic(() => import('../../sample/bitonicSort/main')),
-};
-
-// Samples that demonstrate how to integrate WebGPU and/or WebGPU render operations with other
-// functionalities provided by the web platform.
-const webPlatformPages: PageComponentType = {
-  resizeCanvas: dynamic(() => import('../../sample/resizeCanvas/main')),
-  videoUploading: dynamic(() => import('../../sample/videoUploading/main')),
-  worker: dynamic(() => import('../../sample/worker/main')),
-};
-
-// Samples whose primary purpose is to benchmark WebGPU performance.
-const benchmarkPages: PageComponentType = {
-  animometer: dynamic(() => import('../../sample/animometer/main')),
-};
-
-const pages: PageComponentType = {
-  ...graphicsBasicsPages,
-  ...webGPUFeaturesPages,
-  ...graphicsDemoPages,
-  ...gpuComputeDemoPages,
-  ...webPlatformPages,
-  ...benchmarkPages,
-};
-
-export interface PageCategory {
-  title: string;
-  description?: string;
-  pages: PageComponentType;
-  sampleNames: string[];
-}
-
-const createPageCategory = (
-  title: string,
-  pages: PageComponentType,
-  description?: string
-): PageCategory => {
-  return {
-    title,
-    description,
-    pages,
-    sampleNames: Object.keys(pages),
-  };
-};
-
-export const pageCategories: PageCategory[] = [
-  createPageCategory(
-    'Basic Graphics',
-    graphicsBasicsPages,
-    'Basic rendering functionality implemented with the WebGPU API.'
-  ),
-  createPageCategory(
-    'WebGPU Features',
-    webGPUFeaturesPages,
-    'Highlights of important WebGPU features.'
-  ),
-  createPageCategory(
-    'GPGPU Demos',
-    gpuComputeDemoPages,
-    'Visualizations of parallel GPU compute operations.'
-  ),
-  createPageCategory(
-    'Graphics Techniques',
-    graphicsDemoPages,
-    'A collection of graphics techniques implemented with WebGPU.'
-  ),
-  createPageCategory(
-    'Web Platform Integration',
-    webPlatformPages,
-    'Demos integrating WebGPU with other functionalities of the web platform.'
-  ),
-  createPageCategory(
-    'Benchmarks',
-    benchmarkPages,
-    'WebGPU Performance Benchmarks'
-  ),
-];
-
-function Page({ slug }: Props): JSX.Element {
-  const PageComponent = pages[slug];
-  return  = async () => {
-  const paths = Object.keys(pages).map((p) => ({
-    params: { slug: p },
-  }));
-  return {
-    paths,
-    fallback: false,
-  };
-};
-
-export const getStaticProps: GetStaticProps = async ({
-  params,
-}) => {
-  if (!params) {
-    return { notFound: true };
-  }
-
-  return {
-    props: {
-      slug: params.slug,
-    },
-  };
-};
-
-export default Page;
diff --git a/src/pages/samples/videoUploadingWebCodecs.tsx b/src/pages/samples/videoUploadingWebCodecs.tsx
deleted file mode 100644
index 6e863b62..00000000
--- a/src/pages/samples/videoUploadingWebCodecs.tsx
+++ /dev/null
@@ -1,14 +0,0 @@
-import Head from 'next/head';
-
-function Page(): JSX.Element {
-  return (
-    
-      
-  // * maxStorableFragments: u32
-  // * targetWidth: u32
-  const uniformsSize = roundUp(
-    16 * Float32Array.BYTES_PER_ELEMENT + 2 * Uint32Array.BYTES_PER_ELEMENT,
-    16
-  );
-
-  const uniformBuffer = device.createBuffer({
-    size: uniformsSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-    label: 'uniformBuffer',
-  });
-
-  const opaqueModule = device.createShaderModule({
-    code: opaqueWGSL,
-    label: 'opaqueModule',
-  });
-
-  const opaquePipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: opaqueModule,
-      entryPoint: 'main_vs',
-      buffers: [
-        {
-          arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
-          attributes: [
-            {
-              // position
-              format: 'float32x3',
-              offset: 0,
-              shaderLocation: 0,
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: opaqueModule,
-      entryPoint: 'main_fs',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-    label: 'opaquePipeline',
-  });
-
-  const opaquePassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined,
-        clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: undefined,
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-    label: 'opaquePassDescriptor',
-  };
-
-  const opaqueBindGroup = device.createBindGroup({
-    layout: opaquePipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-          size: 16 * Float32Array.BYTES_PER_ELEMENT,
-          label: 'modelViewProjection',
-        },
-      },
-    ],
-    label: 'opaquePipeline',
-  });
-
-  const translucentModule = device.createShaderModule({
-    code: translucentWGSL,
-    label: 'translucentModule',
-  });
-
-  const translucentBindGroupLayout = device.createBindGroupLayout({
-    label: 'translucentBindGroupLayout',
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'storage',
-        },
-      },
-      {
-        binding: 2,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'storage',
-        },
-      },
-      {
-        binding: 3,
-        visibility: GPUShaderStage.FRAGMENT,
-        texture: { sampleType: 'depth' },
-      },
-      {
-        binding: 4,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'uniform',
-          hasDynamicOffset: true,
-        },
-      },
-    ],
-  });
-
-  const translucentPipeline = device.createRenderPipeline({
-    layout: device.createPipelineLayout({
-      bindGroupLayouts: [translucentBindGroupLayout],
-      label: 'translucentPipelineLayout',
-    }),
-    vertex: {
-      module: translucentModule,
-      entryPoint: 'main_vs',
-      buffers: [
-        {
-          arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT,
-          attributes: [
-            {
-              format: 'float32x3',
-              offset: 0,
-              shaderLocation: 0,
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: translucentModule,
-      entryPoint: 'main_fs',
-      targets: [
-        {
-          format: presentationFormat,
-          writeMask: 0x0,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-    label: 'translucentPipeline',
-  });
-
-  const translucentPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        loadOp: 'load',
-        storeOp: 'store',
-        view: undefined,
-      },
-    ],
-    label: 'translucentPassDescriptor',
-  };
-
-  const compositeModule = device.createShaderModule({
-    code: compositeWGSL,
-    label: 'compositeModule',
-  });
-
-  const compositeBindGroupLayout = device.createBindGroupLayout({
-    label: 'compositeBindGroupLayout',
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'storage',
-        },
-      },
-      {
-        binding: 2,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'storage',
-        },
-      },
-      {
-        binding: 3,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'uniform',
-          hasDynamicOffset: true,
-        },
-      },
-    ],
-  });
-
-  const compositePipeline = device.createRenderPipeline({
-    layout: device.createPipelineLayout({
-      bindGroupLayouts: [compositeBindGroupLayout],
-      label: 'compositePipelineLayout',
-    }),
-    vertex: {
-      module: compositeModule,
-      entryPoint: 'main_vs',
-    },
-    fragment: {
-      module: compositeModule,
-      entryPoint: 'main_fs',
-      targets: [
-        {
-          format: presentationFormat,
-          blend: {
-            color: {
-              srcFactor: 'one',
-              operation: 'add',
-              dstFactor: 'one-minus-src-alpha',
-            },
-            alpha: {},
-          },
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-    label: 'compositePipeline',
-  });
-
-  const compositePassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined,
-        loadOp: 'load',
-        storeOp: 'store',
-      },
-    ],
-    label: 'compositePassDescriptor',
-  };
-
-  const configure = () => {
-    let devicePixelRatio = window.devicePixelRatio;
-
-    // The default maximum storage buffer binding size is 128Mib. The amount
-    // of memory we need to store transparent fragments depends on the size
-    // of the canvas and the average number of layers per fragment we want to
-    // support. When the devicePixelRatio is 1, we know that 128Mib is enough
-    // to store 4 layers per pixel at 600x600. However, when the device pixel
-    // ratio is high enough we will exceed this limit.
-    //
-    // We provide 2 choices of mitigations to this issue:
-    // 1) Clamp the device pixel ratio to a value which we know will not break
-    //    the limit. The tradeoff here is that the canvas resolution will not
-    //    match the native resolution and therefore may have a reduction in
-    //    quality.
-    // 2) Break the frame into a series of horizontal slices using the scissor
-    //    functionality and process a single slice at a time. This limits memory
-    //    usage because we only need enough memory to process the dimensions
-    //    of the slice. The tradeoff is the performance reduction due to multiple
-    //    passes.
-    if (settings.memoryStrategy === 'clamp-pixel-ratio') {
-      devicePixelRatio = Math.min(window.devicePixelRatio, 3);
-    }
-
-    canvas.width = canvas.clientWidth * devicePixelRatio;
-    canvas.height = canvas.clientHeight * devicePixelRatio;
-
-    const depthTexture = device.createTexture({
-      size: [canvas.width, canvas.height],
-      format: 'depth24plus',
-      usage:
-        GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
-      label: 'depthTexture',
-    });
-
-    const depthTextureView = depthTexture.createView({
-      label: 'depthTextureView',
-    });
-
-    // Determines how much memory is allocated to store linked-list elements
-    const averageLayersPerFragment = 4;
-
-    // Each element stores
-    // * color : vec4
-    // * depth : f32
-    // * index of next element in the list : u32
-    const linkedListElementSize =
-      5 * Float32Array.BYTES_PER_ELEMENT + 1 * Uint32Array.BYTES_PER_ELEMENT;
-
-    // We want to keep the linked-list buffer size under the maxStorageBufferBindingSize.
-    // Split the frame into enough slices to meet that constraint.
-    const bytesPerline =
-      canvas.width * averageLayersPerFragment * linkedListElementSize;
-    const maxLinesSupported = Math.floor(
-      device.limits.maxStorageBufferBindingSize / bytesPerline
-    );
-    const numSlices = Math.ceil(canvas.height / maxLinesSupported);
-    const sliceHeight = Math.ceil(canvas.height / numSlices);
-    const linkedListBufferSize = sliceHeight * bytesPerline;
-
-    const linkedListBuffer = device.createBuffer({
-      size: linkedListBufferSize,
-      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
-      label: 'linkedListBuffer',
-    });
-
-    // To slice up the frame we need to pass the starting fragment y position of the slice.
-    // We do this using a uniform buffer with a dynamic offset.
-    const sliceInfoBuffer = device.createBuffer({
-      size: numSlices * device.limits.minUniformBufferOffsetAlignment,
-      usage: GPUBufferUsage.UNIFORM,
-      mappedAtCreation: true,
-      label: 'sliceInfoBuffer',
-    });
-    {
-      const mapping = new Int32Array(sliceInfoBuffer.getMappedRange());
-
-      // This assumes minUniformBufferOffsetAlignment is a multiple of 4
-      const stride =
-        device.limits.minUniformBufferOffsetAlignment /
-        Int32Array.BYTES_PER_ELEMENT;
-      for (let i = 0; i < numSlices; ++i) {
-        mapping[i * stride] = i * sliceHeight;
-      }
-      sliceInfoBuffer.unmap();
-    }
-
-    // `Heads` struct contains the start index of the linked-list of translucent fragments
-    // for a given pixel.
-    // * numFragments : u32
-    // * data : array
-    const headsBuffer = device.createBuffer({
-      size: (1 + canvas.width * sliceHeight) * Uint32Array.BYTES_PER_ELEMENT,
-      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
-      label: 'headsBuffer',
-    });
-
-    const headsInitBuffer = device.createBuffer({
-      size: (1 + canvas.width * sliceHeight) * Uint32Array.BYTES_PER_ELEMENT,
-      usage: GPUBufferUsage.COPY_SRC,
-      mappedAtCreation: true,
-      label: 'headsInitBuffer',
-    });
-    {
-      const buffer = new Uint32Array(headsInitBuffer.getMappedRange());
-
-      for (let i = 0; i < buffer.length; ++i) {
-        buffer[i] = 0xffffffff;
-      }
-
-      headsInitBuffer.unmap();
-    }
-
-    const translucentBindGroup = device.createBindGroup({
-      layout: translucentBindGroupLayout,
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-            label: 'uniforms',
-          },
-        },
-        {
-          binding: 1,
-          resource: {
-            buffer: headsBuffer,
-            label: 'headsBuffer',
-          },
-        },
-        {
-          binding: 2,
-          resource: {
-            buffer: linkedListBuffer,
-            label: 'linkedListBuffer',
-          },
-        },
-        {
-          binding: 3,
-          resource: depthTextureView,
-        },
-        {
-          binding: 4,
-          resource: {
-            buffer: sliceInfoBuffer,
-            size: device.limits.minUniformBufferOffsetAlignment,
-            label: 'sliceInfoBuffer',
-          },
-        },
-      ],
-      label: 'translucentBindGroup',
-    });
-
-    const compositeBindGroup = device.createBindGroup({
-      layout: compositePipeline.getBindGroupLayout(0),
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-            label: 'uniforms',
-          },
-        },
-        {
-          binding: 1,
-          resource: {
-            buffer: headsBuffer,
-            label: 'headsBuffer',
-          },
-        },
-        {
-          binding: 2,
-          resource: {
-            buffer: linkedListBuffer,
-            label: 'linkedListBuffer',
-          },
-        },
-        {
-          binding: 3,
-          resource: {
-            buffer: sliceInfoBuffer,
-            size: device.limits.minUniformBufferOffsetAlignment,
-            label: 'sliceInfoBuffer',
-          },
-        },
-      ],
-    });
-
-    opaquePassDescriptor.depthStencilAttachment.view = depthTextureView;
-
-    // Rotates the camera around the origin based on time.
-    function getCameraViewProjMatrix() {
-      const aspect = canvas.width / canvas.height;
-
-      const projectionMatrix = mat4.perspective(
-        (2 * Math.PI) / 5,
-        aspect,
-        1,
-        2000.0
-      );
-
-      const upVector = vec3.fromValues(0, 1, 0);
-      const origin = vec3.fromValues(0, 0, 0);
-      const eyePosition = vec3.fromValues(0, 5, -100);
-
-      const rad = Math.PI * (Date.now() / 5000);
-      const rotation = mat4.rotateY(mat4.translation(origin), rad);
-      vec3.transformMat4(eyePosition, rotation, eyePosition);
-
-      const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);
-
-      const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);
-      return viewProjMatrix as Float32Array;
-    }
-
-    return function doDraw() {
-      // update the uniform buffer
-      {
-        const buffer = new ArrayBuffer(uniformBuffer.size);
-
-        new Float32Array(buffer).set(getCameraViewProjMatrix());
-        new Uint32Array(buffer, 16 * Float32Array.BYTES_PER_ELEMENT).set([
-          averageLayersPerFragment * canvas.width * sliceHeight,
-          canvas.width,
-        ]);
-
-        device.queue.writeBuffer(uniformBuffer, 0, buffer);
-      }
-
-      const commandEncoder = device.createCommandEncoder();
-      const textureView = context.getCurrentTexture().createView();
-
-      // Draw the opaque objects
-      opaquePassDescriptor.colorAttachments[0].view = textureView;
-      const opaquePassEncoder =
-        commandEncoder.beginRenderPass(opaquePassDescriptor);
-      opaquePassEncoder.setPipeline(opaquePipeline);
-      opaquePassEncoder.setBindGroup(0, opaqueBindGroup);
-      opaquePassEncoder.setVertexBuffer(0, vertexBuffer);
-      opaquePassEncoder.setIndexBuffer(indexBuffer, 'uint16');
-      opaquePassEncoder.drawIndexed(mesh.triangles.length * 3, 8);
-      opaquePassEncoder.end();
-
-      for (let slice = 0; slice < numSlices; ++slice) {
-        // initialize the heads buffer
-        commandEncoder.copyBufferToBuffer(
-          headsInitBuffer,
-          0,
-          headsBuffer,
-          0,
-          headsInitBuffer.size
-        );
-
-        const scissorX = 0;
-        const scissorY = slice * sliceHeight;
-        const scissorWidth = canvas.width;
-        const scissorHeight =
-          Math.min((slice + 1) * sliceHeight, canvas.height) -
-          slice * sliceHeight;
-
-        // Draw the translucent objects
-        translucentPassDescriptor.colorAttachments[0].view = textureView;
-        const translucentPassEncoder = commandEncoder.beginRenderPass(
-          translucentPassDescriptor
-        );
-
-        // Set the scissor to only process a horizontal slice of the frame
-        translucentPassEncoder.setScissorRect(
-          scissorX,
-          scissorY,
-          scissorWidth,
-          scissorHeight
-        );
-
-        translucentPassEncoder.setPipeline(translucentPipeline);
-        translucentPassEncoder.setBindGroup(0, translucentBindGroup, [
-          slice * device.limits.minUniformBufferOffsetAlignment,
-        ]);
-        translucentPassEncoder.setVertexBuffer(0, vertexBuffer);
-        translucentPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
-        translucentPassEncoder.drawIndexed(mesh.triangles.length * 3, 8);
-        translucentPassEncoder.end();
-
-        // Composite the opaque and translucent objects
-        compositePassDescriptor.colorAttachments[0].view = textureView;
-        const compositePassEncoder = commandEncoder.beginRenderPass(
-          compositePassDescriptor
-        );
-
-        // Set the scissor to only process a horizontal slice of the frame
-        compositePassEncoder.setScissorRect(
-          scissorX,
-          scissorY,
-          scissorWidth,
-          scissorHeight
-        );
-
-        compositePassEncoder.setPipeline(compositePipeline);
-        compositePassEncoder.setBindGroup(0, compositeBindGroup, [
-          slice * device.limits.minUniformBufferOffsetAlignment,
-        ]);
-        compositePassEncoder.draw(6);
-        compositePassEncoder.end();
-      }
-
-      device.queue.submit([commandEncoder.finish()]);
-    };
-  };
-
-  let doDraw = configure();
-
-  const updateSettings = () => {
-    doDraw = configure();
-  };
-
-  gui
-    .add(settings, 'memoryStrategy', ['multipass', 'clamp-pixel-ratio'])
-    .onFinishChange(updateSettings);
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    doDraw();
-
-    requestAnimationFrame(frame);
-  }
-
-  requestAnimationFrame(frame);
-};
-
-const ABuffer: () => JSX.Element = () =>
-  makeSample({
-    name: 'A-Buffer',
-    description: `Demonstrates order independent transparency using a per-pixel 
-       linked-list of translucent fragments. Provides a choice for 
-       limiting memory usage (when required).`,
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: 'opaque.wgsl',
-        contents: opaqueWGSL,
-      },
-      {
-        name: 'translucent.wgsl',
-        contents: translucentWGSL,
-      },
-      {
-        name: 'composite.wgsl',
-        contents: compositeWGSL,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default ABuffer;
diff --git a/src/sample/animometer/main.ts b/src/sample/animometer/main.ts
deleted file mode 100644
index f8772358..00000000
--- a/src/sample/animometer/main.ts
+++ /dev/null
@@ -1,404 +0,0 @@
-import { assert, makeSample, SampleInit } from '../../components/SampleLayout';
-
-import animometerWGSL from './animometer.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  assert(adapter, 'requestAdapter returned null');
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-
-  const perfDisplayContainer = document.createElement('div');
-  perfDisplayContainer.style.color = 'white';
-  perfDisplayContainer.style.background = 'black';
-  perfDisplayContainer.style.position = 'absolute';
-  perfDisplayContainer.style.top = '10px';
-  perfDisplayContainer.style.left = '10px';
-
-  const perfDisplay = document.createElement('pre');
-  perfDisplayContainer.appendChild(perfDisplay);
-  if (canvas.parentNode) {
-    canvas.parentNode.appendChild(perfDisplayContainer);
-  } else {
-    console.error('canvas.parentNode is null');
-  }
-
-  const params = new URLSearchParams(window.location.search);
-  const settings = {
-    numTriangles: Number(params.get('numTriangles')) || 20000,
-    renderBundles: Boolean(params.get('renderBundles')),
-    dynamicOffsets: Boolean(params.get('dynamicOffsets')),
-  };
-
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-    usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const timeBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-          minBindingSize: 4,
-        },
-      },
-    ],
-  });
-
-  const bindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-          minBindingSize: 20,
-        },
-      },
-    ],
-  });
-
-  const dynamicBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-          hasDynamicOffset: true,
-          minBindingSize: 20,
-        },
-      },
-    ],
-  });
-
-  const vec4Size = 4 * Float32Array.BYTES_PER_ELEMENT;
-  const pipelineLayout = device.createPipelineLayout({
-    bindGroupLayouts: [timeBindGroupLayout, bindGroupLayout],
-  });
-  const dynamicPipelineLayout = device.createPipelineLayout({
-    bindGroupLayouts: [timeBindGroupLayout, dynamicBindGroupLayout],
-  });
-
-  const shaderModule = device.createShaderModule({
-    code: animometerWGSL,
-  });
-  const pipelineDesc: GPURenderPipelineDescriptor = {
-    layout: 'auto',
-    vertex: {
-      module: shaderModule,
-      entryPoint: 'vert_main',
-      buffers: [
-        {
-          // vertex buffer
-          arrayStride: 2 * vec4Size,
-          stepMode: 'vertex',
-          attributes: [
-            {
-              // vertex positions
-              shaderLocation: 0,
-              offset: 0,
-              format: 'float32x4',
-            },
-            {
-              // vertex colors
-              shaderLocation: 1,
-              offset: vec4Size,
-              format: 'float32x4',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: shaderModule,
-      entryPoint: 'frag_main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-      frontFace: 'ccw',
-      cullMode: 'none',
-    },
-  };
-
-  const pipeline = device.createRenderPipeline({
-    ...pipelineDesc,
-    layout: pipelineLayout,
-  });
-
-  const dynamicPipeline = device.createRenderPipeline({
-    ...pipelineDesc,
-    layout: dynamicPipelineLayout,
-  });
-
-  const vertexBuffer = device.createBuffer({
-    size: 2 * 3 * vec4Size,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-
-  // prettier-ignore
-  new Float32Array(vertexBuffer.getMappedRange()).set([
-    // position data  /**/ color data
-    0, 0.1, 0, 1,     /**/ 1, 0, 0, 1,
-    -0.1, -0.1, 0, 1, /**/ 0, 1, 0, 1,
-    0.1, -0.1, 0, 1,  /**/ 0, 0, 1, 1,
-  ]);
-  vertexBuffer.unmap();
-
-  function configure() {
-    const numTriangles = settings.numTriangles;
-    const uniformBytes = 5 * Float32Array.BYTES_PER_ELEMENT;
-    const alignedUniformBytes = Math.ceil(uniformBytes / 256) * 256;
-    const alignedUniformFloats =
-      alignedUniformBytes / Float32Array.BYTES_PER_ELEMENT;
-    const uniformBuffer = device.createBuffer({
-      size: numTriangles * alignedUniformBytes + Float32Array.BYTES_PER_ELEMENT,
-      usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,
-    });
-    const uniformBufferData = new Float32Array(
-      numTriangles * alignedUniformFloats
-    );
-    const bindGroups = new Array(numTriangles);
-    for (let i = 0; i < numTriangles; ++i) {
-      uniformBufferData[alignedUniformFloats * i + 0] =
-        Math.random() * 0.2 + 0.2; // scale
-      uniformBufferData[alignedUniformFloats * i + 1] =
-        0.9 * 2 * (Math.random() - 0.5); // offsetX
-      uniformBufferData[alignedUniformFloats * i + 2] =
-        0.9 * 2 * (Math.random() - 0.5); // offsetY
-      uniformBufferData[alignedUniformFloats * i + 3] =
-        Math.random() * 1.5 + 0.5; // scalar
-      uniformBufferData[alignedUniformFloats * i + 4] = Math.random() * 10; // scalarOffset
-
-      bindGroups[i] = device.createBindGroup({
-        layout: bindGroupLayout,
-        entries: [
-          {
-            binding: 0,
-            resource: {
-              buffer: uniformBuffer,
-              offset: i * alignedUniformBytes,
-              size: 6 * Float32Array.BYTES_PER_ELEMENT,
-            },
-          },
-        ],
-      });
-    }
-
-    const dynamicBindGroup = device.createBindGroup({
-      layout: dynamicBindGroupLayout,
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-            offset: 0,
-            size: 6 * Float32Array.BYTES_PER_ELEMENT,
-          },
-        },
-      ],
-    });
-
-    const timeOffset = numTriangles * alignedUniformBytes;
-    const timeBindGroup = device.createBindGroup({
-      layout: timeBindGroupLayout,
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-            offset: timeOffset,
-            size: Float32Array.BYTES_PER_ELEMENT,
-          },
-        },
-      ],
-    });
-
-    // writeBuffer too large may OOM. TODO: The browser should internally chunk uploads.
-    const maxMappingLength =
-      (14 * 1024 * 1024) / Float32Array.BYTES_PER_ELEMENT;
-    for (
-      let offset = 0;
-      offset < uniformBufferData.length;
-      offset += maxMappingLength
-    ) {
-      const uploadCount = Math.min(
-        uniformBufferData.length - offset,
-        maxMappingLength
-      );
-
-      device.queue.writeBuffer(
-        uniformBuffer,
-        offset * Float32Array.BYTES_PER_ELEMENT,
-        uniformBufferData.buffer,
-        uniformBufferData.byteOffset + offset * Float32Array.BYTES_PER_ELEMENT,
-        uploadCount * Float32Array.BYTES_PER_ELEMENT
-      );
-    }
-
-    function recordRenderPass(
-      passEncoder: GPURenderBundleEncoder | GPURenderPassEncoder
-    ) {
-      if (settings.dynamicOffsets) {
-        passEncoder.setPipeline(dynamicPipeline);
-      } else {
-        passEncoder.setPipeline(pipeline);
-      }
-      passEncoder.setVertexBuffer(0, vertexBuffer);
-      passEncoder.setBindGroup(0, timeBindGroup);
-      const dynamicOffsets = [0];
-      for (let i = 0; i < numTriangles; ++i) {
-        if (settings.dynamicOffsets) {
-          dynamicOffsets[0] = i * alignedUniformBytes;
-          passEncoder.setBindGroup(1, dynamicBindGroup, dynamicOffsets);
-        } else {
-          passEncoder.setBindGroup(1, bindGroups[i]);
-        }
-        passEncoder.draw(3);
-      }
-    }
-
-    let startTime: number | undefined = undefined;
-    const uniformTime = new Float32Array([0]);
-
-    const renderPassDescriptor = {
-      colorAttachments: [
-        {
-          view: undefined as GPUTextureView, // Assigned later
-          clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-          loadOp: 'clear' as const,
-          storeOp: 'store' as const,
-        },
-      ],
-    };
-
-    const renderBundleEncoder = device.createRenderBundleEncoder({
-      colorFormats: [presentationFormat],
-    });
-    recordRenderPass(renderBundleEncoder);
-    const renderBundle = renderBundleEncoder.finish();
-
-    return function doDraw(timestamp: number) {
-      if (startTime === undefined) {
-        startTime = timestamp;
-      }
-      uniformTime[0] = (timestamp - startTime) / 1000;
-      device.queue.writeBuffer(uniformBuffer, timeOffset, uniformTime.buffer);
-
-      renderPassDescriptor.colorAttachments[0].view = context
-        .getCurrentTexture()
-        .createView();
-
-      const commandEncoder = device.createCommandEncoder();
-      const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-
-      if (settings.renderBundles) {
-        passEncoder.executeBundles([renderBundle]);
-      } else {
-        recordRenderPass(passEncoder);
-      }
-
-      passEncoder.end();
-      device.queue.submit([commandEncoder.finish()]);
-    };
-  }
-
-  let doDraw = configure();
-
-  const updateSettings = () => {
-    doDraw = configure();
-  };
-  if (gui === undefined) {
-    console.error('GUI not initialized');
-  } else {
-    gui
-      .add(settings, 'numTriangles', 0, 200000)
-      .step(1)
-      .onFinishChange(updateSettings);
-    gui.add(settings, 'renderBundles');
-    gui.add(settings, 'dynamicOffsets');
-  }
-
-  let previousFrameTimestamp: number | undefined = undefined;
-  let jsTimeAvg: number | undefined = undefined;
-  let frameTimeAvg: number | undefined = undefined;
-  let updateDisplay = true;
-
-  function frame(timestamp: number) {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    let frameTime = 0;
-    if (previousFrameTimestamp !== undefined) {
-      frameTime = timestamp - previousFrameTimestamp;
-    }
-    previousFrameTimestamp = timestamp;
-
-    const start = performance.now();
-    doDraw(timestamp);
-    const jsTime = performance.now() - start;
-    if (frameTimeAvg === undefined) {
-      frameTimeAvg = frameTime;
-    }
-    if (jsTimeAvg === undefined) {
-      jsTimeAvg = jsTime;
-    }
-
-    const w = 0.2;
-    frameTimeAvg = (1 - w) * frameTimeAvg + w * frameTime;
-    jsTimeAvg = (1 - w) * jsTimeAvg + w * jsTime;
-
-    if (updateDisplay) {
-      perfDisplay.innerHTML = `Avg Javascript: ${jsTimeAvg.toFixed(
-        2
-      )} ms\nAvg Frame: ${frameTimeAvg.toFixed(2)} ms`;
-      updateDisplay = false;
-      setTimeout(() => {
-        updateDisplay = true;
-      }, 100);
-    }
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const Animometer: () => JSX.Element = () =>
-  makeSample({
-    name: 'Animometer',
-    description: 'A WebGPU port of the Animometer MotionMark benchmark.',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './animometer.wgsl',
-        contents: animometerWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default Animometer;
diff --git a/src/sample/cameras/main.ts b/src/sample/cameras/main.ts
deleted file mode 100644
index 861ab27f..00000000
--- a/src/sample/cameras/main.ts
+++ /dev/null
@@ -1,275 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-import cubeWGSL from './cube.wgsl';
-import { ArcballCamera, WASDCamera, cameraSourceInfo } from './camera';
-import { createInputHandler, inputSourceInfo } from './input';
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  if (!pageState.active) {
-    return;
-  }
-
-  // The input handler
-  const inputHandler = createInputHandler(window, canvas);
-
-  // The camera types
-  const initialCameraPosition = vec3.create(3, 2, 5);
-  const cameras = {
-    arcball: new ArcballCamera({ position: initialCameraPosition }),
-    WASD: new WASDCamera({ position: initialCameraPosition }),
-  };
-
-  // GUI parameters
-  const params: { type: 'arcball' | 'WASD' } = {
-    type: 'arcball',
-  };
-
-  // Callback handler for camera mode
-  let oldCameraType = params.type;
-  gui.add(params, 'type', ['arcball', 'WASD']).onChange(() => {
-    // Copy the camera matrix from old to new
-    const newCameraType = params.type;
-    cameras[newCameraType].matrix = cameras[oldCameraType].matrix;
-    oldCameraType = newCameraType;
-  });
-
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: cubeWGSL,
-      }),
-      entryPoint: 'vertex_main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: cubeWGSL,
-      }),
-      entryPoint: 'fragment_main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-      cullMode: 'back',
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const uniformBufferSize = 4 * 16; // 4x4 matrix
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  // Fetch the image and upload it into a GPUTexture.
-  let cubeTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/Di-3d.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-
-    cubeTexture = device.createTexture({
-      size: [imageBitmap.width, imageBitmap.height, 1],
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.TEXTURE_BINDING |
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.RENDER_ATTACHMENT,
-    });
-    device.queue.copyExternalImageToTexture(
-      { source: imageBitmap },
-      { texture: cubeTexture },
-      [imageBitmap.width, imageBitmap.height]
-    );
-  }
-
-  // Create a sampler with linear filtering for smooth interpolation.
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: sampler,
-      },
-      {
-        binding: 2,
-        resource: cubeTexture.createView(),
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-  const modelViewProjectionMatrix = mat4.create();
-
-  function getModelViewProjectionMatrix(deltaTime: number) {
-    const camera = cameras[params.type];
-    const viewMatrix = camera.update(deltaTime, inputHandler());
-    mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);
-    return modelViewProjectionMatrix as Float32Array;
-  }
-
-  let lastFrameMS = Date.now();
-
-  function frame() {
-    const now = Date.now();
-    const deltaTime = (now - lastFrameMS) / 1000;
-    lastFrameMS = now;
-
-    if (!pageState.active) {
-      // Sample is no longer the active page.
-      return;
-    }
-
-    const modelViewProjection = getModelViewProjectionMatrix(deltaTime);
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      modelViewProjection.buffer,
-      modelViewProjection.byteOffset,
-      modelViewProjection.byteLength
-    );
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setBindGroup(0, uniformBindGroup);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-    passEncoder.draw(cubeVertexCount);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const Cameras: () => JSX.Element = () =>
-  makeSample({
-    name: 'Cameras',
-    description: 'This example provides example camera implementations',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      cameraSourceInfo,
-      inputSourceInfo,
-      {
-        name: '../../shaders/cube.wgsl',
-        contents: cubeWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default Cameras;
diff --git a/src/sample/computeBoids/main.ts b/src/sample/computeBoids/main.ts
deleted file mode 100644
index a95ab8ec..00000000
--- a/src/sample/computeBoids/main.ts
+++ /dev/null
@@ -1,384 +0,0 @@
-import { assert, makeSample, SampleInit } from '../../components/SampleLayout';
-
-import spriteWGSL from './sprite.wgsl';
-import updateSpritesWGSL from './updateSprites.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  assert(adapter, 'requestAdapter returned null');
-
-  const hasTimestampQuery = adapter.features.has('timestamp-query');
-  const device = await adapter.requestDevice({
-    requiredFeatures: hasTimestampQuery ? ['timestamp-query'] : [],
-  });
-
-  const perfDisplayContainer = document.createElement('div');
-  perfDisplayContainer.style.color = 'white';
-  perfDisplayContainer.style.backdropFilter = 'blur(10px)';
-  perfDisplayContainer.style.position = 'absolute';
-  perfDisplayContainer.style.bottom = '10px';
-  perfDisplayContainer.style.left = '10px';
-  perfDisplayContainer.style.textAlign = 'left';
-
-  const perfDisplay = document.createElement('pre');
-  perfDisplay.style.margin = '.5em';
-  perfDisplayContainer.appendChild(perfDisplay);
-  if (canvas.parentNode) {
-    canvas.parentNode.appendChild(perfDisplayContainer);
-  } else {
-    console.error('canvas.parentNode is null');
-  }
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const spriteShaderModule = device.createShaderModule({ code: spriteWGSL });
-  const renderPipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: spriteShaderModule,
-      entryPoint: 'vert_main',
-      buffers: [
-        {
-          // instanced particles buffer
-          arrayStride: 4 * 4,
-          stepMode: 'instance',
-          attributes: [
-            {
-              // instance position
-              shaderLocation: 0,
-              offset: 0,
-              format: 'float32x2',
-            },
-            {
-              // instance velocity
-              shaderLocation: 1,
-              offset: 2 * 4,
-              format: 'float32x2',
-            },
-          ],
-        },
-        {
-          // vertex buffer
-          arrayStride: 2 * 4,
-          stepMode: 'vertex',
-          attributes: [
-            {
-              // vertex positions
-              shaderLocation: 2,
-              offset: 0,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: spriteShaderModule,
-      entryPoint: 'frag_main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-  });
-
-  const computePipeline = device.createComputePipeline({
-    layout: 'auto',
-    compute: {
-      module: device.createShaderModule({
-        code: updateSpritesWGSL,
-      }),
-      entryPoint: 'main',
-    },
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined as GPUTextureView, // Assigned later
-        clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-        loadOp: 'clear' as const,
-        storeOp: 'store' as const,
-      },
-    ],
-  };
-
-  const computePassDescriptor: GPUComputePassDescriptor = {};
-
-  /** Storage for timestamp query results */
-  let querySet: GPUQuerySet | undefined = undefined;
-  /** Timestamps are resolved into this buffer */
-  let resolveBuffer: GPUBuffer | undefined = undefined;
-  /** Pool of spare buffers for MAP_READing the timestamps back to CPU. A buffer
-   * is taken from the pool (if available) when a readback is needed, and placed
-   * back into the pool once the readback is done and it's unmapped. */
-  const spareResultBuffers = [];
-
-  if (hasTimestampQuery) {
-    querySet = device.createQuerySet({
-      type: 'timestamp',
-      count: 4,
-    });
-    resolveBuffer = device.createBuffer({
-      size: 4 * BigInt64Array.BYTES_PER_ELEMENT,
-      usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC,
-    });
-    computePassDescriptor.timestampWrites = {
-      querySet,
-      beginningOfPassWriteIndex: 0,
-      endOfPassWriteIndex: 1,
-    };
-    renderPassDescriptor.timestampWrites = {
-      querySet,
-      beginningOfPassWriteIndex: 2,
-      endOfPassWriteIndex: 3,
-    };
-  }
-
-  // prettier-ignore
-  const vertexBufferData = new Float32Array([
-    -0.01, -0.02, 0.01,
-    -0.02, 0.0, 0.02,
-  ]);
-
-  const spriteVertexBuffer = device.createBuffer({
-    size: vertexBufferData.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(spriteVertexBuffer.getMappedRange()).set(vertexBufferData);
-  spriteVertexBuffer.unmap();
-
-  const simParams = {
-    deltaT: 0.04,
-    rule1Distance: 0.1,
-    rule2Distance: 0.025,
-    rule3Distance: 0.025,
-    rule1Scale: 0.02,
-    rule2Scale: 0.05,
-    rule3Scale: 0.005,
-  };
-
-  const simParamBufferSize = 7 * Float32Array.BYTES_PER_ELEMENT;
-  const simParamBuffer = device.createBuffer({
-    size: simParamBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  function updateSimParams() {
-    device.queue.writeBuffer(
-      simParamBuffer,
-      0,
-      new Float32Array([
-        simParams.deltaT,
-        simParams.rule1Distance,
-        simParams.rule2Distance,
-        simParams.rule3Distance,
-        simParams.rule1Scale,
-        simParams.rule2Scale,
-        simParams.rule3Scale,
-      ])
-    );
-  }
-
-  updateSimParams();
-  Object.keys(simParams).forEach((k) => {
-    const key = k as keyof typeof simParams;
-    if (gui === undefined) {
-      console.error('GUI not initialized');
-    } else {
-      gui.add(simParams, key).onFinishChange(updateSimParams);
-    }
-  });
-
-  const numParticles = 1500;
-  const initialParticleData = new Float32Array(numParticles * 4);
-  for (let i = 0; i < numParticles; ++i) {
-    initialParticleData[4 * i + 0] = 2 * (Math.random() - 0.5);
-    initialParticleData[4 * i + 1] = 2 * (Math.random() - 0.5);
-    initialParticleData[4 * i + 2] = 2 * (Math.random() - 0.5) * 0.1;
-    initialParticleData[4 * i + 3] = 2 * (Math.random() - 0.5) * 0.1;
-  }
-
-  const particleBuffers: GPUBuffer[] = new Array(2);
-  const particleBindGroups: GPUBindGroup[] = new Array(2);
-  for (let i = 0; i < 2; ++i) {
-    particleBuffers[i] = device.createBuffer({
-      size: initialParticleData.byteLength,
-      usage: GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE,
-      mappedAtCreation: true,
-    });
-    new Float32Array(particleBuffers[i].getMappedRange()).set(
-      initialParticleData
-    );
-    particleBuffers[i].unmap();
-  }
-
-  for (let i = 0; i < 2; ++i) {
-    particleBindGroups[i] = device.createBindGroup({
-      layout: computePipeline.getBindGroupLayout(0),
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: simParamBuffer,
-          },
-        },
-        {
-          binding: 1,
-          resource: {
-            buffer: particleBuffers[i],
-            offset: 0,
-            size: initialParticleData.byteLength,
-          },
-        },
-        {
-          binding: 2,
-          resource: {
-            buffer: particleBuffers[(i + 1) % 2],
-            offset: 0,
-            size: initialParticleData.byteLength,
-          },
-        },
-      ],
-    });
-  }
-
-  let t = 0;
-  let computePassDurationSum = 0;
-  let renderPassDurationSum = 0;
-  let timerSamples = 0;
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    {
-      const passEncoder = commandEncoder.beginComputePass(
-        computePassDescriptor
-      );
-      passEncoder.setPipeline(computePipeline);
-      passEncoder.setBindGroup(0, particleBindGroups[t % 2]);
-      passEncoder.dispatchWorkgroups(Math.ceil(numParticles / 64));
-      passEncoder.end();
-    }
-    {
-      const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-      passEncoder.setPipeline(renderPipeline);
-      passEncoder.setVertexBuffer(0, particleBuffers[(t + 1) % 2]);
-      passEncoder.setVertexBuffer(1, spriteVertexBuffer);
-      passEncoder.draw(3, numParticles, 0, 0);
-      passEncoder.end();
-    }
-
-    let resultBuffer: GPUBuffer | undefined = undefined;
-    if (hasTimestampQuery) {
-      resultBuffer =
-        spareResultBuffers.pop() ||
-        device.createBuffer({
-          size: 4 * BigInt64Array.BYTES_PER_ELEMENT,
-          usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
-        });
-      commandEncoder.resolveQuerySet(querySet, 0, 4, resolveBuffer, 0);
-      commandEncoder.copyBufferToBuffer(
-        resolveBuffer,
-        0,
-        resultBuffer,
-        0,
-        resultBuffer.size
-      );
-    }
-
-    device.queue.submit([commandEncoder.finish()]);
-
-    if (hasTimestampQuery) {
-      resultBuffer.mapAsync(GPUMapMode.READ).then(() => {
-        const times = new BigInt64Array(resultBuffer.getMappedRange());
-        const computePassDuration = Number(times[1] - times[0]);
-        const renderPassDuration = Number(times[3] - times[2]);
-
-        // In some cases the timestamps may wrap around and produce a negative
-        // number as the GPU resets it's timings. These can safely be ignored.
-        if (computePassDuration > 0 && renderPassDuration > 0) {
-          computePassDurationSum += computePassDuration;
-          renderPassDurationSum += renderPassDuration;
-          timerSamples++;
-        }
-        resultBuffer.unmap();
-
-        // Periodically update the text for the timer stats
-        const kNumTimerSamplesPerUpdate = 100;
-        if (timerSamples >= kNumTimerSamplesPerUpdate) {
-          const avgComputeMicroseconds = Math.round(
-            computePassDurationSum / timerSamples / 1000
-          );
-          const avgRenderMicroseconds = Math.round(
-            renderPassDurationSum / timerSamples / 1000
-          );
-          perfDisplay.textContent = `\
-avg compute pass duration: ${avgComputeMicroseconds}µs
-avg render pass duration:  ${avgRenderMicroseconds}µs
-spare readback buffers:    ${spareResultBuffers.length}`;
-          computePassDurationSum = 0;
-          renderPassDurationSum = 0;
-          timerSamples = 0;
-        }
-        spareResultBuffers.push(resultBuffer);
-      });
-    }
-
-    ++t;
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const ComputeBoids: () => JSX.Element = () =>
-  makeSample({
-    name: 'Compute Boids',
-    description:
-      'A GPU compute particle simulation that mimics \
-the flocking behavior of birds. A compute shader updates \
-two ping-pong buffers which store particle data. The data \
-is used to draw instanced particles.',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: 'updateSprites.wgsl',
-        contents: updateSpritesWGSL,
-        editable: true,
-      },
-      {
-        name: 'sprite.wgsl',
-        contents: spriteWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default ComputeBoids;
diff --git a/src/sample/cornell/main.ts b/src/sample/cornell/main.ts
deleted file mode 100644
index 40a9a995..00000000
--- a/src/sample/cornell/main.ts
+++ /dev/null
@@ -1,165 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import radiosityWGSL from './radiosity.wgsl';
-import rasterizerWGSL from './rasterizer.wgsl';
-import raytracerWGSL from './raytracer.wgsl';
-import tonemapperWGSL from './tonemapper.wgsl';
-import commonWGSL from './common.wgsl';
-import Scene from './scene';
-import Common from './common';
-import Radiosity from './radiosity';
-import Rasterizer from './rasterizer';
-import Tonemapper from './tonemapper';
-import Raytracer from './raytracer';
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-  const requiredFeatures: GPUFeatureName[] =
-    presentationFormat === 'bgra8unorm' ? ['bgra8unorm-storage'] : [];
-  const adapter = await navigator.gpu.requestAdapter();
-  for (const feature of requiredFeatures) {
-    if (!adapter.features.has(feature)) {
-      throw new Error(
-        `sample requires ${feature}, but is not supported by the adapter`
-      );
-    }
-  }
-  const device = await adapter.requestDevice({ requiredFeatures });
-
-  if (!pageState.active) return;
-
-  const params: {
-    renderer: 'rasterizer' | 'raytracer';
-    rotateCamera: boolean;
-  } = {
-    renderer: 'rasterizer',
-    rotateCamera: true,
-  };
-
-  gui.add(params, 'renderer', ['rasterizer', 'raytracer']);
-  gui.add(params, 'rotateCamera', true);
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-  context.configure({
-    device,
-    format: presentationFormat,
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.STORAGE_BINDING,
-    alphaMode: 'premultiplied',
-  });
-
-  const framebuffer = device.createTexture({
-    label: 'framebuffer',
-    size: [canvas.width, canvas.height],
-    format: 'rgba16float',
-    usage:
-      GPUTextureUsage.RENDER_ATTACHMENT |
-      GPUTextureUsage.STORAGE_BINDING |
-      GPUTextureUsage.TEXTURE_BINDING,
-  });
-
-  const scene = new Scene(device);
-  const common = new Common(device, scene.quadBuffer);
-  const radiosity = new Radiosity(device, common, scene);
-  const rasterizer = new Rasterizer(
-    device,
-    common,
-    scene,
-    radiosity,
-    framebuffer
-  );
-  const raytracer = new Raytracer(device, common, radiosity, framebuffer);
-
-  function frame() {
-    if (!pageState.active) {
-      // Sample is no longer the active page.
-      return;
-    }
-
-    const canvasTexture = context.getCurrentTexture();
-    const commandEncoder = device.createCommandEncoder();
-
-    common.update({
-      rotateCamera: params.rotateCamera,
-      aspect: canvas.width / canvas.height,
-    });
-    radiosity.run(commandEncoder);
-
-    switch (params.renderer) {
-      case 'rasterizer': {
-        rasterizer.run(commandEncoder);
-        break;
-      }
-      case 'raytracer': {
-        raytracer.run(commandEncoder);
-        break;
-      }
-    }
-
-    const tonemapper = new Tonemapper(
-      device,
-      common,
-      framebuffer,
-      canvasTexture
-    );
-    tonemapper.run(commandEncoder);
-
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-
-  requestAnimationFrame(frame);
-};
-
-const CornellBox: () => JSX.Element = () =>
-  makeSample({
-    name: 'Cornell box',
-    description:
-      'A classic Cornell box, using a lightmap generated using software ray-tracing.',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      Common.sourceInfo,
-      Scene.sourceInfo,
-      Radiosity.sourceInfo,
-      Rasterizer.sourceInfo,
-      Raytracer.sourceInfo,
-      Tonemapper.sourceInfo,
-      {
-        name: './radiosity.wgsl',
-        contents: radiosityWGSL,
-        editable: true,
-      },
-      {
-        name: './rasterizer.wgsl',
-        contents: rasterizerWGSL,
-        editable: true,
-      },
-      {
-        name: './raytracer.wgsl',
-        contents: raytracerWGSL,
-        editable: true,
-      },
-      {
-        name: './tonemapper.wgsl',
-        contents: tonemapperWGSL,
-        editable: true,
-      },
-      {
-        name: './common.wgsl',
-        contents: commonWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default CornellBox;
diff --git a/src/sample/cubemap/main.ts b/src/sample/cubemap/main.ts
deleted file mode 100644
index d6513c54..00000000
--- a/src/sample/cubemap/main.ts
+++ /dev/null
@@ -1,288 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-
-import basicVertWGSL from '../../shaders/basic.vert.wgsl';
-import sampleCubemapWGSL from './sampleCubemap.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: basicVertWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: sampleCubemapWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Since we are seeing from inside of the cube
-      // and we are using the regular cube geomtry data with outward-facing normals,
-      // the cullMode should be 'front' or 'none'.
-      cullMode: 'none',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  // Fetch the 6 separate images for negative/positive x, y, z axis of a cubemap
-  // and upload it into a GPUTexture.
-  let cubemapTexture: GPUTexture;
-  {
-    // The order of the array layers is [+X, -X, +Y, -Y, +Z, -Z]
-    const imgSrcs = [
-      '../assets/img/cubemap/posx.jpg',
-      '../assets/img/cubemap/negx.jpg',
-      '../assets/img/cubemap/posy.jpg',
-      '../assets/img/cubemap/negy.jpg',
-      '../assets/img/cubemap/posz.jpg',
-      '../assets/img/cubemap/negz.jpg',
-    ];
-    const promises = imgSrcs.map(async (src) => {
-      const response = await fetch(src);
-      return createImageBitmap(await response.blob());
-    });
-    const imageBitmaps = await Promise.all(promises);
-
-    cubemapTexture = device.createTexture({
-      dimension: '2d',
-      // Create a 2d array texture.
-      // Assume each image has the same size.
-      size: [imageBitmaps[0].width, imageBitmaps[0].height, 6],
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.TEXTURE_BINDING |
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.RENDER_ATTACHMENT,
-    });
-
-    for (let i = 0; i < imageBitmaps.length; i++) {
-      const imageBitmap = imageBitmaps[i];
-      device.queue.copyExternalImageToTexture(
-        { source: imageBitmap },
-        { texture: cubemapTexture, origin: [0, 0, i] },
-        [imageBitmap.width, imageBitmap.height]
-      );
-    }
-  }
-
-  const uniformBufferSize = 4 * 16; // 4x4 matrix
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-          offset: 0,
-          size: uniformBufferSize,
-        },
-      },
-      {
-        binding: 1,
-        resource: sampler,
-      },
-      {
-        binding: 2,
-        resource: cubemapTexture.createView({
-          dimension: 'cube',
-        }),
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 3000);
-
-  const modelMatrix = mat4.scaling(vec3.fromValues(1000, 1000, 1000));
-  const modelViewProjectionMatrix = mat4.create() as Float32Array;
-  const viewMatrix = mat4.identity();
-
-  const tmpMat4 = mat4.create();
-
-  // Comppute camera movement:
-  // It rotates around Y axis with a slight pitch movement.
-  function updateTransformationMatrix() {
-    const now = Date.now() / 800;
-
-    mat4.rotate(
-      viewMatrix,
-      vec3.fromValues(1, 0, 0),
-      (Math.PI / 10) * Math.sin(now),
-      tmpMat4
-    );
-    mat4.rotate(tmpMat4, vec3.fromValues(0, 1, 0), now * 0.2, tmpMat4);
-
-    mat4.multiply(tmpMat4, modelMatrix, modelViewProjectionMatrix);
-    mat4.multiply(
-      projectionMatrix,
-      modelViewProjectionMatrix,
-      modelViewProjectionMatrix
-    );
-  }
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    updateTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      modelViewProjectionMatrix.buffer,
-      modelViewProjectionMatrix.byteOffset,
-      modelViewProjectionMatrix.byteLength
-    );
-
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-    passEncoder.setBindGroup(0, uniformBindGroup);
-    passEncoder.draw(cubeVertexCount);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const CubemapCubes: () => JSX.Element = () =>
-  makeSample({
-    name: 'Cubemap',
-    description:
-      'This example shows how to render and sample from a cubemap texture.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/basic.vert.wgsl',
-        contents: basicVertWGSL,
-        editable: true,
-      },
-      {
-        name: './sampleCubemap.frag.wgsl',
-        contents: sampleCubemapWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default CubemapCubes;
diff --git a/src/sample/deferredRendering/main.ts b/src/sample/deferredRendering/main.ts
deleted file mode 100644
index 35a3d265..00000000
--- a/src/sample/deferredRendering/main.ts
+++ /dev/null
@@ -1,656 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import { mat4, vec3, vec4 } from 'wgpu-matrix';
-import { mesh } from '../../meshes/stanfordDragon';
-
-import lightUpdate from './lightUpdate.wgsl';
-import vertexWriteGBuffers from './vertexWriteGBuffers.wgsl';
-import fragmentWriteGBuffers from './fragmentWriteGBuffers.wgsl';
-import vertexTextureQuad from './vertexTextureQuad.wgsl';
-import fragmentGBuffersDebugView from './fragmentGBuffersDebugView.wgsl';
-import fragmentDeferredRendering from './fragmentDeferredRendering.wgsl';
-
-const kMaxNumLights = 1024;
-const lightExtentMin = vec3.fromValues(-50, -30, -50);
-const lightExtentMax = vec3.fromValues(50, 50, 50);
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const aspect = canvas.width / canvas.height;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create the model vertex buffer.
-  const kVertexStride = 8;
-  const vertexBuffer = device.createBuffer({
-    // position: vec3, normal: vec3, uv: vec2
-    size:
-      mesh.positions.length * kVertexStride * Float32Array.BYTES_PER_ELEMENT,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  {
-    const mapping = new Float32Array(vertexBuffer.getMappedRange());
-    for (let i = 0; i < mesh.positions.length; ++i) {
-      mapping.set(mesh.positions[i], kVertexStride * i);
-      mapping.set(mesh.normals[i], kVertexStride * i + 3);
-      mapping.set(mesh.uvs[i], kVertexStride * i + 6);
-    }
-    vertexBuffer.unmap();
-  }
-
-  // Create the model index buffer.
-  const indexCount = mesh.triangles.length * 3;
-  const indexBuffer = device.createBuffer({
-    size: indexCount * Uint16Array.BYTES_PER_ELEMENT,
-    usage: GPUBufferUsage.INDEX,
-    mappedAtCreation: true,
-  });
-  {
-    const mapping = new Uint16Array(indexBuffer.getMappedRange());
-    for (let i = 0; i < mesh.triangles.length; ++i) {
-      mapping.set(mesh.triangles[i], 3 * i);
-    }
-    indexBuffer.unmap();
-  }
-
-  // GBuffer texture render targets
-  const gBufferTexture2DFloat16 = device.createTexture({
-    size: [canvas.width, canvas.height],
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
-    format: 'rgba16float',
-  });
-  const gBufferTextureAlbedo = device.createTexture({
-    size: [canvas.width, canvas.height],
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
-    format: 'bgra8unorm',
-  });
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
-  });
-
-  const gBufferTextureViews = [
-    gBufferTexture2DFloat16.createView(),
-    gBufferTextureAlbedo.createView(),
-    depthTexture.createView(),
-  ];
-
-  const vertexBuffers: Iterable = [
-    {
-      arrayStride: Float32Array.BYTES_PER_ELEMENT * 8,
-      attributes: [
-        {
-          // position
-          shaderLocation: 0,
-          offset: 0,
-          format: 'float32x3',
-        },
-        {
-          // normal
-          shaderLocation: 1,
-          offset: Float32Array.BYTES_PER_ELEMENT * 3,
-          format: 'float32x3',
-        },
-        {
-          // uv
-          shaderLocation: 2,
-          offset: Float32Array.BYTES_PER_ELEMENT * 6,
-          format: 'float32x2',
-        },
-      ],
-    },
-  ];
-
-  const primitive: GPUPrimitiveState = {
-    topology: 'triangle-list',
-    cullMode: 'back',
-  };
-
-  const writeGBuffersPipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexWriteGBuffers,
-      }),
-      entryPoint: 'main',
-      buffers: vertexBuffers,
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentWriteGBuffers,
-      }),
-      entryPoint: 'main',
-      targets: [
-        // normal
-        { format: 'rgba16float' },
-        // albedo
-        { format: 'bgra8unorm' },
-      ],
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-    primitive,
-  });
-
-  const gBufferTexturesBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.FRAGMENT,
-        texture: {
-          sampleType: 'unfilterable-float',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.FRAGMENT,
-        texture: {
-          sampleType: 'unfilterable-float',
-        },
-      },
-      {
-        binding: 2,
-        visibility: GPUShaderStage.FRAGMENT,
-        texture: {
-          sampleType: 'depth',
-        },
-      },
-    ],
-  });
-
-  const lightsBufferBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
-        buffer: {
-          type: 'read-only-storage',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-      {
-        binding: 2,
-        visibility: GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-    ],
-  });
-
-  const gBuffersDebugViewPipeline = device.createRenderPipeline({
-    layout: device.createPipelineLayout({
-      bindGroupLayouts: [gBufferTexturesBindGroupLayout],
-    }),
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexTextureQuad,
-      }),
-      entryPoint: 'main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentGBuffersDebugView,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-      constants: {
-        canvasSizeWidth: canvas.width,
-        canvasSizeHeight: canvas.height,
-      },
-    },
-    primitive,
-  });
-
-  const deferredRenderPipeline = device.createRenderPipeline({
-    layout: device.createPipelineLayout({
-      bindGroupLayouts: [
-        gBufferTexturesBindGroupLayout,
-        lightsBufferBindGroupLayout,
-      ],
-    }),
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexTextureQuad,
-      }),
-      entryPoint: 'main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentDeferredRendering,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive,
-  });
-
-  const writeGBufferPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: gBufferTextureViews[0],
-
-        clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-      {
-        view: gBufferTextureViews[1],
-
-        clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const textureQuadPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        // view is acquired and set in render loop.
-        view: undefined,
-
-        clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-  };
-
-  const settings = {
-    mode: 'rendering',
-    numLights: 128,
-  };
-  const configUniformBuffer = (() => {
-    const buffer = device.createBuffer({
-      size: Uint32Array.BYTES_PER_ELEMENT,
-      mappedAtCreation: true,
-      usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-    });
-    new Uint32Array(buffer.getMappedRange())[0] = settings.numLights;
-    buffer.unmap();
-    return buffer;
-  })();
-
-  gui.add(settings, 'mode', ['rendering', 'gBuffers view']);
-  gui
-    .add(settings, 'numLights', 1, kMaxNumLights)
-    .step(1)
-    .onChange(() => {
-      device.queue.writeBuffer(
-        configUniformBuffer,
-        0,
-        new Uint32Array([settings.numLights])
-      );
-    });
-
-  const modelUniformBuffer = device.createBuffer({
-    size: 4 * 16 * 2, // two 4x4 matrix
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const cameraUniformBuffer = device.createBuffer({
-    size: 4 * 16 * 2, // two 4x4 matrix
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const sceneUniformBindGroup = device.createBindGroup({
-    layout: writeGBuffersPipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: modelUniformBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: {
-          buffer: cameraUniformBuffer,
-        },
-      },
-    ],
-  });
-
-  const gBufferTexturesBindGroup = device.createBindGroup({
-    layout: gBufferTexturesBindGroupLayout,
-    entries: [
-      {
-        binding: 0,
-        resource: gBufferTextureViews[0],
-      },
-      {
-        binding: 1,
-        resource: gBufferTextureViews[1],
-      },
-      {
-        binding: 2,
-        resource: gBufferTextureViews[2],
-      },
-    ],
-  });
-
-  // Lights data are uploaded in a storage buffer
-  // which could be updated/culled/etc. with a compute shader
-  const extent = vec3.sub(lightExtentMax, lightExtentMin);
-  const lightDataStride = 8;
-  const bufferSizeInByte =
-    Float32Array.BYTES_PER_ELEMENT * lightDataStride * kMaxNumLights;
-  const lightsBuffer = device.createBuffer({
-    size: bufferSizeInByte,
-    usage: GPUBufferUsage.STORAGE,
-    mappedAtCreation: true,
-  });
-
-  // We randomaly populate lights randomly in a box range
-  // And simply move them along y-axis per frame to show they are
-  // dynamic lightings
-  const lightData = new Float32Array(lightsBuffer.getMappedRange());
-  const tmpVec4 = vec4.create();
-  let offset = 0;
-  for (let i = 0; i < kMaxNumLights; i++) {
-    offset = lightDataStride * i;
-    // position
-    for (let i = 0; i < 3; i++) {
-      tmpVec4[i] = Math.random() * extent[i] + lightExtentMin[i];
-    }
-    tmpVec4[3] = 1;
-    lightData.set(tmpVec4, offset);
-    // color
-    tmpVec4[0] = Math.random() * 2;
-    tmpVec4[1] = Math.random() * 2;
-    tmpVec4[2] = Math.random() * 2;
-    // radius
-    tmpVec4[3] = 20.0;
-    lightData.set(tmpVec4, offset + 4);
-  }
-  lightsBuffer.unmap();
-
-  const lightExtentBuffer = device.createBuffer({
-    size: 4 * 8,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-  const lightExtentData = new Float32Array(8);
-  lightExtentData.set(lightExtentMin, 0);
-  lightExtentData.set(lightExtentMax, 4);
-  device.queue.writeBuffer(
-    lightExtentBuffer,
-    0,
-    lightExtentData.buffer,
-    lightExtentData.byteOffset,
-    lightExtentData.byteLength
-  );
-
-  const lightUpdateComputePipeline = device.createComputePipeline({
-    layout: 'auto',
-    compute: {
-      module: device.createShaderModule({
-        code: lightUpdate,
-      }),
-      entryPoint: 'main',
-    },
-  });
-  const lightsBufferBindGroup = device.createBindGroup({
-    layout: lightsBufferBindGroupLayout,
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: lightsBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: {
-          buffer: configUniformBuffer,
-        },
-      },
-      {
-        binding: 2,
-        resource: {
-          buffer: cameraUniformBuffer,
-        },
-      },
-    ],
-  });
-  const lightsBufferComputeBindGroup = device.createBindGroup({
-    layout: lightUpdateComputePipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: lightsBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: {
-          buffer: configUniformBuffer,
-        },
-      },
-      {
-        binding: 2,
-        resource: {
-          buffer: lightExtentBuffer,
-        },
-      },
-    ],
-  });
-  //--------------------
-
-  // Scene matrices
-  const eyePosition = vec3.fromValues(0, 50, -100);
-  const upVector = vec3.fromValues(0, 1, 0);
-  const origin = vec3.fromValues(0, 0, 0);
-
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    2000.0
-  );
-
-  // Move the model so it's centered.
-  const modelMatrix = mat4.translation([0, -45, 0]);
-
-  const modelData = modelMatrix as Float32Array;
-  device.queue.writeBuffer(
-    modelUniformBuffer,
-    0,
-    modelData.buffer,
-    modelData.byteOffset,
-    modelData.byteLength
-  );
-  const invertTransposeModelMatrix = mat4.invert(modelMatrix);
-  mat4.transpose(invertTransposeModelMatrix, invertTransposeModelMatrix);
-  const normalModelData = invertTransposeModelMatrix as Float32Array;
-  device.queue.writeBuffer(
-    modelUniformBuffer,
-    64,
-    normalModelData.buffer,
-    normalModelData.byteOffset,
-    normalModelData.byteLength
-  );
-
-  // Rotates the camera around the origin based on time.
-  function getCameraViewProjMatrix() {
-    const rad = Math.PI * (Date.now() / 5000);
-    const rotation = mat4.rotateY(mat4.translation(origin), rad);
-    const rotatedEyePosition = vec3.transformMat4(eyePosition, rotation);
-
-    const viewMatrix = mat4.lookAt(rotatedEyePosition, origin, upVector);
-
-    return mat4.multiply(projectionMatrix, viewMatrix) as Float32Array;
-  }
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const cameraViewProj = getCameraViewProjMatrix();
-    device.queue.writeBuffer(
-      cameraUniformBuffer,
-      0,
-      cameraViewProj.buffer,
-      cameraViewProj.byteOffset,
-      cameraViewProj.byteLength
-    );
-    const cameraInvViewProj = mat4.invert(cameraViewProj) as Float32Array;
-    device.queue.writeBuffer(
-      cameraUniformBuffer,
-      64,
-      cameraInvViewProj.buffer,
-      cameraInvViewProj.byteOffset,
-      cameraInvViewProj.byteLength
-    );
-
-    const commandEncoder = device.createCommandEncoder();
-    {
-      // Write position, normal, albedo etc. data to gBuffers
-      const gBufferPass = commandEncoder.beginRenderPass(
-        writeGBufferPassDescriptor
-      );
-      gBufferPass.setPipeline(writeGBuffersPipeline);
-      gBufferPass.setBindGroup(0, sceneUniformBindGroup);
-      gBufferPass.setVertexBuffer(0, vertexBuffer);
-      gBufferPass.setIndexBuffer(indexBuffer, 'uint16');
-      gBufferPass.drawIndexed(indexCount);
-      gBufferPass.end();
-    }
-    {
-      // Update lights position
-      const lightPass = commandEncoder.beginComputePass();
-      lightPass.setPipeline(lightUpdateComputePipeline);
-      lightPass.setBindGroup(0, lightsBufferComputeBindGroup);
-      lightPass.dispatchWorkgroups(Math.ceil(kMaxNumLights / 64));
-      lightPass.end();
-    }
-    {
-      if (settings.mode === 'gBuffers view') {
-        // GBuffers debug view
-        // Left: depth
-        // Middle: normal
-        // Right: albedo (use uv to mimic a checkerboard texture)
-        textureQuadPassDescriptor.colorAttachments[0].view = context
-          .getCurrentTexture()
-          .createView();
-        const debugViewPass = commandEncoder.beginRenderPass(
-          textureQuadPassDescriptor
-        );
-        debugViewPass.setPipeline(gBuffersDebugViewPipeline);
-        debugViewPass.setBindGroup(0, gBufferTexturesBindGroup);
-        debugViewPass.draw(6);
-        debugViewPass.end();
-      } else {
-        // Deferred rendering
-        textureQuadPassDescriptor.colorAttachments[0].view = context
-          .getCurrentTexture()
-          .createView();
-        const deferredRenderingPass = commandEncoder.beginRenderPass(
-          textureQuadPassDescriptor
-        );
-        deferredRenderingPass.setPipeline(deferredRenderPipeline);
-        deferredRenderingPass.setBindGroup(0, gBufferTexturesBindGroup);
-        deferredRenderingPass.setBindGroup(1, lightsBufferBindGroup);
-        deferredRenderingPass.draw(6);
-        deferredRenderingPass.end();
-      }
-    }
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const DeferredRendering: () => JSX.Element = () =>
-  makeSample({
-    name: 'Deferred Rendering',
-    description: `This example shows how to do deferred rendering with webgpu.
-      Render geometry info to multiple targets in the gBuffers in the first pass.
-      In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.
-      And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.
-      World-space positions are reconstructed from the depth texture and camera matrix.
-      We also update light position in a compute shader, where further operations like tile/cluster culling could happen.
-      The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer
-      in the middle, and the albedo G-buffer on the right side of the screen.
-      `,
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: 'vertexWriteGBuffers.wgsl',
-        contents: vertexWriteGBuffers,
-        editable: true,
-      },
-      {
-        name: 'fragmentWriteGBuffers.wgsl',
-        contents: fragmentWriteGBuffers,
-        editable: true,
-      },
-      {
-        name: 'vertexTextureQuad.wgsl',
-        contents: vertexTextureQuad,
-        editable: true,
-      },
-      {
-        name: 'fragmentGBuffersDebugView.wgsl',
-        contents: fragmentGBuffersDebugView,
-        editable: true,
-      },
-      {
-        name: 'fragmentDeferredRendering.wgsl',
-        contents: fragmentDeferredRendering,
-        editable: true,
-      },
-      {
-        name: 'lightUpdate.wgsl',
-        contents: lightUpdate,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default DeferredRendering;
diff --git a/src/sample/fractalCube/main.ts b/src/sample/fractalCube/main.ts
deleted file mode 100644
index 3abc1149..00000000
--- a/src/sample/fractalCube/main.ts
+++ /dev/null
@@ -1,266 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-
-import basicVertWGSL from '../../shaders/basic.vert.wgsl';
-import sampleSelfWGSL from './sampleSelf.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-
-    // Specify we want both RENDER_ATTACHMENT and COPY_SRC since we
-    // will copy out of the swapchain texture.
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: basicVertWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: sampleSelfWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Backface culling since the cube is solid piece of geometry.
-      // Faces pointing away from the camera will be occluded by faces
-      // pointing toward the camera.
-      cullMode: 'back',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const uniformBufferSize = 4 * 16; // 4x4 matrix
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  // We will copy the frame's rendering results into this texture and
-  // sample it on the next frame.
-  const cubeTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: presentationFormat,
-    usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
-  });
-
-  // Create a sampler with linear filtering for smooth interpolation.
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: sampler,
-      },
-      {
-        binding: 2,
-        resource: cubeTexture.createView(),
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-  const modelViewProjectionMatrix = mat4.create();
-
-  function getTransformationMatrix() {
-    const viewMatrix = mat4.identity();
-    mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);
-    const now = Date.now() / 1000;
-    mat4.rotate(
-      viewMatrix,
-      vec3.fromValues(Math.sin(now), Math.cos(now), 0),
-      1,
-      viewMatrix
-    );
-
-    mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);
-
-    return modelViewProjectionMatrix as Float32Array;
-  }
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const transformationMatrix = getTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      transformationMatrix.buffer,
-      transformationMatrix.byteOffset,
-      transformationMatrix.byteLength
-    );
-
-    const swapChainTexture = context.getCurrentTexture();
-    // prettier-ignore
-    renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setBindGroup(0, uniformBindGroup);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-    passEncoder.draw(cubeVertexCount);
-    passEncoder.end();
-
-    // Copy the rendering results from the swapchain into |cubeTexture|.
-    commandEncoder.copyTextureToTexture(
-      {
-        texture: swapChainTexture,
-      },
-      {
-        texture: cubeTexture,
-      },
-      [canvas.width, canvas.height]
-    );
-
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const FractalCube: () => JSX.Element = () =>
-  makeSample({
-    name: 'Fractal Cube',
-    description:
-      "This example uses the previous frame's rendering result \
-       as the source texture for the next frame.",
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/basic.vert.wgsl',
-        contents: basicVertWGSL,
-        editable: true,
-      },
-      {
-        name: './sampleSelf.frag.wgsl',
-        contents: sampleSelfWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default FractalCube;
diff --git a/src/sample/gameOfLife/main.ts b/src/sample/gameOfLife/main.ts
deleted file mode 100644
index f772b46f..00000000
--- a/src/sample/gameOfLife/main.ts
+++ /dev/null
@@ -1,305 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import computeWGSL from './compute.wgsl';
-import vertWGSL from './vert.wgsl';
-import fragWGSL from './frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const GameOptions = {
-    width: 128,
-    height: 128,
-    timestep: 4,
-    workgroupSize: 8,
-  };
-
-  const computeShader = device.createShaderModule({ code: computeWGSL });
-  const bindGroupLayoutCompute = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.COMPUTE,
-        buffer: {
-          type: 'read-only-storage',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.COMPUTE,
-        buffer: {
-          type: 'read-only-storage',
-        },
-      },
-      {
-        binding: 2,
-        visibility: GPUShaderStage.COMPUTE,
-        buffer: {
-          type: 'storage',
-        },
-      },
-    ],
-  });
-
-  const squareVertices = new Uint32Array([0, 0, 0, 1, 1, 0, 1, 1]);
-  const squareBuffer = device.createBuffer({
-    size: squareVertices.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Uint32Array(squareBuffer.getMappedRange()).set(squareVertices);
-  squareBuffer.unmap();
-
-  const squareStride: GPUVertexBufferLayout = {
-    arrayStride: 2 * squareVertices.BYTES_PER_ELEMENT,
-    stepMode: 'vertex',
-    attributes: [
-      {
-        shaderLocation: 1,
-        offset: 0,
-        format: 'uint32x2',
-      },
-    ],
-  };
-
-  const vertexShader = device.createShaderModule({ code: vertWGSL });
-  const fragmentShader = device.createShaderModule({ code: fragWGSL });
-  let commandEncoder: GPUCommandEncoder;
-
-  const bindGroupLayoutRender = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-    ],
-  });
-
-  const cellsStride: GPUVertexBufferLayout = {
-    arrayStride: Uint32Array.BYTES_PER_ELEMENT,
-    stepMode: 'instance',
-    attributes: [
-      {
-        shaderLocation: 0,
-        offset: 0,
-        format: 'uint32',
-      },
-    ],
-  };
-
-  function addGUI() {
-    gui.add(GameOptions, 'timestep', 1, 60, 1);
-    gui.add(GameOptions, 'width', 16, 1024, 16).onFinishChange(resetGameData);
-    gui.add(GameOptions, 'height', 16, 1024, 16).onFinishChange(resetGameData);
-    gui
-      .add(GameOptions, 'workgroupSize', [4, 8, 16])
-      .onFinishChange(resetGameData);
-  }
-
-  let wholeTime = 0,
-    loopTimes = 0,
-    buffer0: GPUBuffer,
-    buffer1: GPUBuffer;
-  let render: () => void;
-  function resetGameData() {
-    // compute pipeline
-    const computePipeline = device.createComputePipeline({
-      layout: device.createPipelineLayout({
-        bindGroupLayouts: [bindGroupLayoutCompute],
-      }),
-      compute: {
-        module: computeShader,
-        entryPoint: 'main',
-        constants: {
-          blockSize: GameOptions.workgroupSize,
-        },
-      },
-    });
-    const sizeBuffer = device.createBuffer({
-      size: 2 * Uint32Array.BYTES_PER_ELEMENT,
-      usage:
-        GPUBufferUsage.STORAGE |
-        GPUBufferUsage.UNIFORM |
-        GPUBufferUsage.COPY_DST |
-        GPUBufferUsage.VERTEX,
-      mappedAtCreation: true,
-    });
-    new Uint32Array(sizeBuffer.getMappedRange()).set([
-      GameOptions.width,
-      GameOptions.height,
-    ]);
-    sizeBuffer.unmap();
-    const length = GameOptions.width * GameOptions.height;
-    const cells = new Uint32Array(length);
-    for (let i = 0; i < length; i++) {
-      cells[i] = Math.random() < 0.25 ? 1 : 0;
-    }
-
-    buffer0 = device.createBuffer({
-      size: cells.byteLength,
-      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.VERTEX,
-      mappedAtCreation: true,
-    });
-    new Uint32Array(buffer0.getMappedRange()).set(cells);
-    buffer0.unmap();
-
-    buffer1 = device.createBuffer({
-      size: cells.byteLength,
-      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.VERTEX,
-    });
-
-    const bindGroup0 = device.createBindGroup({
-      layout: bindGroupLayoutCompute,
-      entries: [
-        { binding: 0, resource: { buffer: sizeBuffer } },
-        { binding: 1, resource: { buffer: buffer0 } },
-        { binding: 2, resource: { buffer: buffer1 } },
-      ],
-    });
-
-    const bindGroup1 = device.createBindGroup({
-      layout: bindGroupLayoutCompute,
-      entries: [
-        { binding: 0, resource: { buffer: sizeBuffer } },
-        { binding: 1, resource: { buffer: buffer1 } },
-        { binding: 2, resource: { buffer: buffer0 } },
-      ],
-    });
-
-    const renderPipeline = device.createRenderPipeline({
-      layout: device.createPipelineLayout({
-        bindGroupLayouts: [bindGroupLayoutRender],
-      }),
-      primitive: {
-        topology: 'triangle-strip',
-      },
-      vertex: {
-        module: vertexShader,
-        entryPoint: 'main',
-        buffers: [cellsStride, squareStride],
-      },
-      fragment: {
-        module: fragmentShader,
-        entryPoint: 'main',
-        targets: [
-          {
-            format: presentationFormat,
-          },
-        ],
-      },
-    });
-
-    const uniformBindGroup = device.createBindGroup({
-      layout: renderPipeline.getBindGroupLayout(0),
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: sizeBuffer,
-            offset: 0,
-            size: 2 * Uint32Array.BYTES_PER_ELEMENT,
-          },
-        },
-      ],
-    });
-
-    loopTimes = 0;
-    render = () => {
-      const view = context.getCurrentTexture().createView();
-      const renderPass: GPURenderPassDescriptor = {
-        colorAttachments: [
-          {
-            view,
-            loadOp: 'clear',
-            storeOp: 'store',
-          },
-        ],
-      };
-      commandEncoder = device.createCommandEncoder();
-
-      // compute
-      const passEncoderCompute = commandEncoder.beginComputePass();
-      passEncoderCompute.setPipeline(computePipeline);
-      passEncoderCompute.setBindGroup(0, loopTimes ? bindGroup1 : bindGroup0);
-      passEncoderCompute.dispatchWorkgroups(
-        GameOptions.width / GameOptions.workgroupSize,
-        GameOptions.height / GameOptions.workgroupSize
-      );
-      passEncoderCompute.end();
-      // render
-      const passEncoderRender = commandEncoder.beginRenderPass(renderPass);
-      passEncoderRender.setPipeline(renderPipeline);
-      passEncoderRender.setVertexBuffer(0, loopTimes ? buffer1 : buffer0);
-      passEncoderRender.setVertexBuffer(1, squareBuffer);
-      passEncoderRender.setBindGroup(0, uniformBindGroup);
-      passEncoderRender.draw(4, length);
-      passEncoderRender.end();
-
-      device.queue.submit([commandEncoder.finish()]);
-    };
-  }
-
-  addGUI();
-  resetGameData();
-
-  (function loop() {
-    if (GameOptions.timestep) {
-      wholeTime++;
-      if (wholeTime >= GameOptions.timestep) {
-        render();
-        wholeTime -= GameOptions.timestep;
-        loopTimes = 1 - loopTimes;
-      }
-    }
-
-    requestAnimationFrame(loop);
-  })();
-};
-
-const GameOfLife: () => JSX.Element = () =>
-  makeSample({
-    name: "Conway's Game of Life",
-    description:
-      "This example shows how to make Conway's game of life. First, use compute shader to calculate how cells grow or die. Then use render pipeline to draw cells by using instance mesh.",
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './gameOfLife.compute.wgsl',
-        contents: computeWGSL,
-        editable: true,
-      },
-      {
-        name: './gameOfLife.vert.wgsl',
-        contents: vertWGSL,
-        editable: true,
-      },
-      {
-        name: './gameOfLife.frag.wgsl',
-        contents: fragWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default GameOfLife;
diff --git a/src/sample/helloTriangle/main.ts b/src/sample/helloTriangle/main.ts
deleted file mode 100644
index 0330b4df..00000000
--- a/src/sample/helloTriangle/main.ts
+++ /dev/null
@@ -1,102 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import triangleVertWGSL from '../../shaders/triangle.vert.wgsl';
-import redFragWGSL from '../../shaders/red.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: triangleVertWGSL,
-      }),
-      entryPoint: 'main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: redFragWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-  });
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const commandEncoder = device.createCommandEncoder();
-    const textureView = context.getCurrentTexture().createView();
-
-    const renderPassDescriptor: GPURenderPassDescriptor = {
-      colorAttachments: [
-        {
-          view: textureView,
-          clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-          loadOp: 'clear',
-          storeOp: 'store',
-        },
-      ],
-    };
-
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.draw(3);
-    passEncoder.end();
-
-    device.queue.submit([commandEncoder.finish()]);
-    requestAnimationFrame(frame);
-  }
-
-  requestAnimationFrame(frame);
-};
-
-const HelloTriangle: () => JSX.Element = () =>
-  makeSample({
-    name: 'Hello Triangle',
-    description: 'Shows rendering a basic triangle.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/triangle.vert.wgsl',
-        contents: triangleVertWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/red.frag.wgsl',
-        contents: redFragWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default HelloTriangle;
diff --git a/src/sample/helloTriangleMSAA/main.ts b/src/sample/helloTriangleMSAA/main.ts
deleted file mode 100644
index 9b663118..00000000
--- a/src/sample/helloTriangleMSAA/main.ts
+++ /dev/null
@@ -1,115 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import triangleVertWGSL from '../../shaders/triangle.vert.wgsl';
-import redFragWGSL from '../../shaders/red.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const sampleCount = 4;
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: triangleVertWGSL,
-      }),
-      entryPoint: 'main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: redFragWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-    multisample: {
-      count: 4,
-    },
-  });
-
-  const texture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    sampleCount,
-    format: presentationFormat,
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-  const view = texture.createView();
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const commandEncoder = device.createCommandEncoder();
-
-    const renderPassDescriptor: GPURenderPassDescriptor = {
-      colorAttachments: [
-        {
-          view,
-          resolveTarget: context.getCurrentTexture().createView(),
-          clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-          loadOp: 'clear',
-          storeOp: 'discard',
-        },
-      ],
-    };
-
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.draw(3);
-    passEncoder.end();
-
-    device.queue.submit([commandEncoder.finish()]);
-    requestAnimationFrame(frame);
-  }
-
-  requestAnimationFrame(frame);
-};
-
-const HelloTriangleMSAA: () => JSX.Element = () =>
-  makeSample({
-    name: 'Hello Triangle MSAA',
-    description: 'Shows multisampled rendering a basic triangle.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/triangle.vert.wgsl',
-        contents: triangleVertWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/red.frag.wgsl',
-        contents: redFragWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default HelloTriangleMSAA;
diff --git a/src/sample/imageBlur/main.ts b/src/sample/imageBlur/main.ts
deleted file mode 100644
index 4331e717..00000000
--- a/src/sample/imageBlur/main.ts
+++ /dev/null
@@ -1,321 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import blurWGSL from './blur.wgsl';
-import fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';
-
-// Contants from the blur.wgsl shader.
-const tileDim = 128;
-const batch = [4, 4];
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const blurPipeline = device.createComputePipeline({
-    layout: 'auto',
-    compute: {
-      module: device.createShaderModule({
-        code: blurWGSL,
-      }),
-      entryPoint: 'main',
-    },
-  });
-
-  const fullscreenQuadPipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: fullscreenTexturedQuadWGSL,
-      }),
-      entryPoint: 'vert_main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fullscreenTexturedQuadWGSL,
-      }),
-      entryPoint: 'frag_main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-  });
-
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  const response = await fetch('../assets/img/Di-3d.png');
-  const imageBitmap = await createImageBitmap(await response.blob());
-
-  const [srcWidth, srcHeight] = [imageBitmap.width, imageBitmap.height];
-  const cubeTexture = device.createTexture({
-    size: [srcWidth, srcHeight, 1],
-    format: 'rgba8unorm',
-    usage:
-      GPUTextureUsage.TEXTURE_BINDING |
-      GPUTextureUsage.COPY_DST |
-      GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-  device.queue.copyExternalImageToTexture(
-    { source: imageBitmap },
-    { texture: cubeTexture },
-    [imageBitmap.width, imageBitmap.height]
-  );
-
-  const textures = [0, 1].map(() => {
-    return device.createTexture({
-      size: {
-        width: srcWidth,
-        height: srcHeight,
-      },
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.STORAGE_BINDING |
-        GPUTextureUsage.TEXTURE_BINDING,
-    });
-  });
-
-  const buffer0 = (() => {
-    const buffer = device.createBuffer({
-      size: 4,
-      mappedAtCreation: true,
-      usage: GPUBufferUsage.UNIFORM,
-    });
-    new Uint32Array(buffer.getMappedRange())[0] = 0;
-    buffer.unmap();
-    return buffer;
-  })();
-
-  const buffer1 = (() => {
-    const buffer = device.createBuffer({
-      size: 4,
-      mappedAtCreation: true,
-      usage: GPUBufferUsage.UNIFORM,
-    });
-    new Uint32Array(buffer.getMappedRange())[0] = 1;
-    buffer.unmap();
-    return buffer;
-  })();
-
-  const blurParamsBuffer = device.createBuffer({
-    size: 8,
-    usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,
-  });
-
-  const computeConstants = device.createBindGroup({
-    layout: blurPipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: sampler,
-      },
-      {
-        binding: 1,
-        resource: {
-          buffer: blurParamsBuffer,
-        },
-      },
-    ],
-  });
-
-  const computeBindGroup0 = device.createBindGroup({
-    layout: blurPipeline.getBindGroupLayout(1),
-    entries: [
-      {
-        binding: 1,
-        resource: cubeTexture.createView(),
-      },
-      {
-        binding: 2,
-        resource: textures[0].createView(),
-      },
-      {
-        binding: 3,
-        resource: {
-          buffer: buffer0,
-        },
-      },
-    ],
-  });
-
-  const computeBindGroup1 = device.createBindGroup({
-    layout: blurPipeline.getBindGroupLayout(1),
-    entries: [
-      {
-        binding: 1,
-        resource: textures[0].createView(),
-      },
-      {
-        binding: 2,
-        resource: textures[1].createView(),
-      },
-      {
-        binding: 3,
-        resource: {
-          buffer: buffer1,
-        },
-      },
-    ],
-  });
-
-  const computeBindGroup2 = device.createBindGroup({
-    layout: blurPipeline.getBindGroupLayout(1),
-    entries: [
-      {
-        binding: 1,
-        resource: textures[1].createView(),
-      },
-      {
-        binding: 2,
-        resource: textures[0].createView(),
-      },
-      {
-        binding: 3,
-        resource: {
-          buffer: buffer0,
-        },
-      },
-    ],
-  });
-
-  const showResultBindGroup = device.createBindGroup({
-    layout: fullscreenQuadPipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: sampler,
-      },
-      {
-        binding: 1,
-        resource: textures[1].createView(),
-      },
-    ],
-  });
-
-  const settings = {
-    filterSize: 15,
-    iterations: 2,
-  };
-
-  let blockDim: number;
-  const updateSettings = () => {
-    blockDim = tileDim - (settings.filterSize - 1);
-    device.queue.writeBuffer(
-      blurParamsBuffer,
-      0,
-      new Uint32Array([settings.filterSize, blockDim])
-    );
-  };
-  gui.add(settings, 'filterSize', 1, 33).step(2).onChange(updateSettings);
-  gui.add(settings, 'iterations', 1, 10).step(1);
-
-  updateSettings();
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const commandEncoder = device.createCommandEncoder();
-
-    const computePass = commandEncoder.beginComputePass();
-    computePass.setPipeline(blurPipeline);
-    computePass.setBindGroup(0, computeConstants);
-
-    computePass.setBindGroup(1, computeBindGroup0);
-    computePass.dispatchWorkgroups(
-      Math.ceil(srcWidth / blockDim),
-      Math.ceil(srcHeight / batch[1])
-    );
-
-    computePass.setBindGroup(1, computeBindGroup1);
-    computePass.dispatchWorkgroups(
-      Math.ceil(srcHeight / blockDim),
-      Math.ceil(srcWidth / batch[1])
-    );
-
-    for (let i = 0; i < settings.iterations - 1; ++i) {
-      computePass.setBindGroup(1, computeBindGroup2);
-      computePass.dispatchWorkgroups(
-        Math.ceil(srcWidth / blockDim),
-        Math.ceil(srcHeight / batch[1])
-      );
-
-      computePass.setBindGroup(1, computeBindGroup1);
-      computePass.dispatchWorkgroups(
-        Math.ceil(srcHeight / blockDim),
-        Math.ceil(srcWidth / batch[1])
-      );
-    }
-
-    computePass.end();
-
-    const passEncoder = commandEncoder.beginRenderPass({
-      colorAttachments: [
-        {
-          view: context.getCurrentTexture().createView(),
-          clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-          loadOp: 'clear',
-          storeOp: 'store',
-        },
-      ],
-    });
-
-    passEncoder.setPipeline(fullscreenQuadPipeline);
-    passEncoder.setBindGroup(0, showResultBindGroup);
-    passEncoder.draw(6);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const ImageBlur: () => JSX.Element = () =>
-  makeSample({
-    name: 'Image Blur',
-    description:
-      'This example shows how to blur an image using a WebGPU compute shader.',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './blur.wgsl',
-        contents: blurWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/fullscreenTexturedQuad.wgsl',
-        contents: fullscreenTexturedQuadWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default ImageBlur;
diff --git a/src/sample/instancedCube/main.ts b/src/sample/instancedCube/main.ts
deleted file mode 100644
index baec3e39..00000000
--- a/src/sample/instancedCube/main.ts
+++ /dev/null
@@ -1,273 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-
-import instancedVertWGSL from './instanced.vert.wgsl';
-import vertexPositionColorWGSL from '../../shaders/vertexPositionColor.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: instancedVertWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: vertexPositionColorWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Backface culling since the cube is solid piece of geometry.
-      // Faces pointing away from the camera will be occluded by faces
-      // pointing toward the camera.
-      cullMode: 'back',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const xCount = 4;
-  const yCount = 4;
-  const numInstances = xCount * yCount;
-  const matrixFloatCount = 16; // 4x4 matrix
-  const matrixSize = 4 * matrixFloatCount;
-  const uniformBufferSize = numInstances * matrixSize;
-
-  // Allocate a buffer large enough to hold transforms for every
-  // instance.
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-    ],
-  });
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-
-  type Mat4 = mat4.default;
-  const modelMatrices = new Array(numInstances);
-  const mvpMatricesData = new Float32Array(matrixFloatCount * numInstances);
-
-  const step = 4.0;
-
-  // Initialize the matrix data for every instance.
-  let m = 0;
-  for (let x = 0; x < xCount; x++) {
-    for (let y = 0; y < yCount; y++) {
-      modelMatrices[m] = mat4.translation(
-        vec3.fromValues(
-          step * (x - xCount / 2 + 0.5),
-          step * (y - yCount / 2 + 0.5),
-          0
-        )
-      );
-      m++;
-    }
-  }
-
-  const viewMatrix = mat4.translation(vec3.fromValues(0, 0, -12));
-
-  const tmpMat4 = mat4.create();
-
-  // Update the transformation matrix data for each instance.
-  function updateTransformationMatrix() {
-    const now = Date.now() / 1000;
-
-    let m = 0,
-      i = 0;
-    for (let x = 0; x < xCount; x++) {
-      for (let y = 0; y < yCount; y++) {
-        mat4.rotate(
-          modelMatrices[i],
-          vec3.fromValues(
-            Math.sin((x + 0.5) * now),
-            Math.cos((y + 0.5) * now),
-            0
-          ),
-          1,
-          tmpMat4
-        );
-
-        mat4.multiply(viewMatrix, tmpMat4, tmpMat4);
-        mat4.multiply(projectionMatrix, tmpMat4, tmpMat4);
-
-        mvpMatricesData.set(tmpMat4, m);
-
-        i++;
-        m += matrixFloatCount;
-      }
-    }
-  }
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    // Update the matrix data.
-    updateTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      mvpMatricesData.buffer,
-      mvpMatricesData.byteOffset,
-      mvpMatricesData.byteLength
-    );
-
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setBindGroup(0, uniformBindGroup);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-    passEncoder.draw(cubeVertexCount, numInstances, 0, 0);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const InstancedCube: () => JSX.Element = () =>
-  makeSample({
-    name: 'Instanced Cube',
-    description: 'This example shows the use of instancing.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/instanced.vert.wgsl',
-        contents: instancedVertWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/vertexPositionColor.frag.wgsl',
-        contents: vertexPositionColorWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default InstancedCube;
diff --git a/src/sample/normalMap/main.ts b/src/sample/normalMap/main.ts
deleted file mode 100644
index 78b2e9c1..00000000
--- a/src/sample/normalMap/main.ts
+++ /dev/null
@@ -1,427 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import normalMapWGSL from './normalMap.wgsl';
-import { createMeshRenderable } from '../../meshes/mesh';
-import { createBoxMeshWithTangents } from '../../meshes/box';
-import {
-  createBindGroupDescriptor,
-  create3DRenderPipeline,
-  createTextureFromImage,
-} from './utils';
-
-const MAT4X4_BYTES = 64;
-enum TextureAtlas {
-  Spiral,
-  Toybox,
-  BrickWall,
-}
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  interface GUISettings {
-    'Bump Mode':
-      | 'Albedo Texture'
-      | 'Normal Texture'
-      | 'Depth Texture'
-      | 'Normal Map'
-      | 'Parallax Scale'
-      | 'Steep Parallax';
-    cameraPosX: number;
-    cameraPosY: number;
-    cameraPosZ: number;
-    lightPosX: number;
-    lightPosY: number;
-    lightPosZ: number;
-    lightIntensity: number;
-    depthScale: number;
-    depthLayers: number;
-    Texture: string;
-    'Reset Light': () => void;
-  }
-
-  const settings: GUISettings = {
-    'Bump Mode': 'Normal Map',
-    cameraPosX: 0.0,
-    cameraPosY: 0.8,
-    cameraPosZ: -1.4,
-    lightPosX: 1.7,
-    lightPosY: 0.7,
-    lightPosZ: -1.9,
-    lightIntensity: 5.0,
-    depthScale: 0.05,
-    depthLayers: 16,
-    Texture: 'Spiral',
-    'Reset Light': () => {
-      return;
-    },
-  };
-
-  // Create normal mapping resources and pipeline
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const spaceTransformsBuffer = device.createBuffer({
-    // Buffer holding projection, view, and model matrices plus padding bytes
-    size: MAT4X4_BYTES * 4,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const mapInfoBuffer = device.createBuffer({
-    // Buffer holding mapping type, light uniforms, and depth uniforms
-    size: Float32Array.BYTES_PER_ELEMENT * 8,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-  const mapInfoArray = new ArrayBuffer(mapInfoBuffer.size);
-  const mapInfoView = new DataView(mapInfoArray, 0, mapInfoArray.byteLength);
-
-  // Fetch the image and upload it into a GPUTexture.
-  let woodAlbedoTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/wood_albedo.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    woodAlbedoTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let spiralNormalTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/spiral_normal.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    spiralNormalTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let spiralHeightTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/spiral_height.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    spiralHeightTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let toyboxNormalTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/toybox_normal.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    toyboxNormalTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let toyboxHeightTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/toybox_height.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    toyboxHeightTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let brickwallAlbedoTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/brickwall_albedo.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    brickwallAlbedoTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let brickwallNormalTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/brickwall_normal.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    brickwallNormalTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  let brickwallHeightTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/brickwall_height.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-    brickwallHeightTexture = createTextureFromImage(device, imageBitmap);
-  }
-
-  // Create a sampler with linear filtering for smooth interpolation.
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const box = createMeshRenderable(
-    device,
-    createBoxMeshWithTangents(1.0, 1.0, 1.0)
-  );
-
-  // Uniform bindGroups and bindGroupLayout
-  const frameBGDescriptor = createBindGroupDescriptor(
-    [0, 1],
-    [
-      GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
-      GPUShaderStage.FRAGMENT | GPUShaderStage.VERTEX,
-    ],
-    ['buffer', 'buffer'],
-    [{ type: 'uniform' }, { type: 'uniform' }],
-    [[{ buffer: spaceTransformsBuffer }, { buffer: mapInfoBuffer }]],
-    'Frame',
-    device
-  );
-
-  // Texture bindGroups and bindGroupLayout
-  const surfaceBGDescriptor = createBindGroupDescriptor(
-    [0, 1, 2, 3],
-    [GPUShaderStage.FRAGMENT],
-    ['sampler', 'texture', 'texture', 'texture'],
-    [
-      { type: 'filtering' },
-      { sampleType: 'float' },
-      { sampleType: 'float' },
-      { sampleType: 'float' },
-    ],
-    // Multiple bindgroups that accord to the layout defined above
-    [
-      [
-        sampler,
-        woodAlbedoTexture.createView(),
-        spiralNormalTexture.createView(),
-        spiralHeightTexture.createView(),
-      ],
-      [
-        sampler,
-        woodAlbedoTexture.createView(),
-        toyboxNormalTexture.createView(),
-        toyboxHeightTexture.createView(),
-      ],
-      [
-        sampler,
-        brickwallAlbedoTexture.createView(),
-        brickwallNormalTexture.createView(),
-        brickwallHeightTexture.createView(),
-      ],
-    ],
-    'Surface',
-    device
-  );
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    0.1,
-    10.0
-  ) as Float32Array;
-
-  function getViewMatrix() {
-    return mat4.lookAt(
-      [settings.cameraPosX, settings.cameraPosY, settings.cameraPosZ],
-      [0, 0, 0],
-      [0, 1, 0]
-    );
-  }
-
-  function getModelMatrix() {
-    const modelMatrix = mat4.create();
-    mat4.identity(modelMatrix);
-    const now = Date.now() / 1000;
-    mat4.rotateY(modelMatrix, now * -0.5, modelMatrix);
-    return modelMatrix;
-  }
-
-  // Change the model mapping type
-  const getMode = (): number => {
-    switch (settings['Bump Mode']) {
-      case 'Albedo Texture':
-        return 0;
-      case 'Normal Texture':
-        return 1;
-      case 'Depth Texture':
-        return 2;
-      case 'Normal Map':
-        return 3;
-      case 'Parallax Scale':
-        return 4;
-      case 'Steep Parallax':
-        return 5;
-    }
-  };
-
-  const texturedCubePipeline = create3DRenderPipeline(
-    device,
-    'NormalMappingRender',
-    [frameBGDescriptor.bindGroupLayout, surfaceBGDescriptor.bindGroupLayout],
-    normalMapWGSL,
-    // Position,   normal       uv           tangent      bitangent
-    ['float32x3', 'float32x3', 'float32x2', 'float32x3', 'float32x3'],
-    normalMapWGSL,
-    presentationFormat,
-    true
-  );
-
-  let currentSurfaceBindGroup = 0;
-  const onChangeTexture = () => {
-    currentSurfaceBindGroup = TextureAtlas[settings.Texture];
-  };
-
-  gui.add(settings, 'Bump Mode', [
-    'Albedo Texture',
-    'Normal Texture',
-    'Depth Texture',
-    'Normal Map',
-    'Parallax Scale',
-    'Steep Parallax',
-  ]);
-  gui
-    .add(settings, 'Texture', ['Spiral', 'Toybox', 'BrickWall'])
-    .onChange(onChangeTexture);
-  const lightFolder = gui.addFolder('Light');
-  const depthFolder = gui.addFolder('Depth');
-  lightFolder.add(settings, 'Reset Light').onChange(() => {
-    lightPosXController.setValue(1.7);
-    lightPosYController.setValue(0.7);
-    lightPosZController.setValue(-1.9);
-    lightIntensityController.setValue(5.0);
-  });
-  const lightPosXController = lightFolder
-    .add(settings, 'lightPosX', -5, 5)
-    .step(0.1);
-  const lightPosYController = lightFolder
-    .add(settings, 'lightPosY', -5, 5)
-    .step(0.1);
-  const lightPosZController = lightFolder
-    .add(settings, 'lightPosZ', -5, 5)
-    .step(0.1);
-  const lightIntensityController = lightFolder
-    .add(settings, 'lightIntensity', 0.0, 10)
-    .step(0.1);
-  depthFolder.add(settings, 'depthScale', 0.0, 0.1).step(0.01);
-  depthFolder.add(settings, 'depthLayers', 1, 32).step(1);
-
-  function frame() {
-    if (!pageState.active) return;
-
-    // Update spaceTransformsBuffer
-    const viewMatrix = getViewMatrix();
-    const worldViewMatrix = mat4.mul(viewMatrix, getModelMatrix());
-    const worldViewProjMatrix = mat4.mul(projectionMatrix, worldViewMatrix);
-    const matrices = new Float32Array([
-      ...worldViewProjMatrix,
-      ...worldViewMatrix,
-    ]);
-
-    // Update mapInfoBuffer
-    const lightPosWS = vec3.create(
-      settings.lightPosX,
-      settings.lightPosY,
-      settings.lightPosZ
-    );
-    const lightPosVS = vec3.transformMat4(lightPosWS, viewMatrix);
-    const mode = getMode();
-    device.queue.writeBuffer(
-      spaceTransformsBuffer,
-      0,
-      matrices.buffer,
-      matrices.byteOffset,
-      matrices.byteLength
-    );
-
-    // struct MapInfo {
-    //   lightPosVS: vec3f,
-    //   mode: u32,
-    //   lightIntensity: f32,
-    //   depthScale: f32,
-    //   depthLayers: f32,
-    // }
-    mapInfoView.setFloat32(0, lightPosVS[0], true);
-    mapInfoView.setFloat32(4, lightPosVS[1], true);
-    mapInfoView.setFloat32(8, lightPosVS[2], true);
-    mapInfoView.setUint32(12, mode, true);
-    mapInfoView.setFloat32(16, settings.lightIntensity, true);
-    mapInfoView.setFloat32(20, settings.depthScale, true);
-    mapInfoView.setFloat32(24, settings.depthLayers, true);
-    device.queue.writeBuffer(mapInfoBuffer, 0, mapInfoArray);
-
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    // Draw textured Cube
-    passEncoder.setPipeline(texturedCubePipeline);
-    passEncoder.setBindGroup(0, frameBGDescriptor.bindGroups[0]);
-    passEncoder.setBindGroup(
-      1,
-      surfaceBGDescriptor.bindGroups[currentSurfaceBindGroup]
-    );
-    passEncoder.setVertexBuffer(0, box.vertexBuffer);
-    passEncoder.setIndexBuffer(box.indexBuffer, 'uint16');
-    passEncoder.drawIndexed(box.indexCount);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const NormalMapping: () => JSX.Element = () =>
-  makeSample({
-    name: 'Normal Mapping',
-    description:
-      'This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './normalMap.wgsl',
-        contents: normalMapWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/box.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/box.ts').default,
-      },
-      {
-        name: '../../meshes/mesh.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/mesh.ts').default,
-      },
-      {
-        name: './utils.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./utils.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default NormalMapping;
diff --git a/src/sample/particles/main.ts b/src/sample/particles/main.ts
deleted file mode 100644
index 8ca9d7fb..00000000
--- a/src/sample/particles/main.ts
+++ /dev/null
@@ -1,474 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import particleWGSL from './particle.wgsl';
-import probabilityMapWGSL from './probabilityMap.wgsl';
-
-const numParticles = 50000;
-const particlePositionOffset = 0;
-const particleColorOffset = 4 * 4;
-const particleInstanceByteSize =
-  3 * 4 + // position
-  1 * 4 + // lifetime
-  4 * 4 + // color
-  3 * 4 + // velocity
-  1 * 4 + // padding
-  0;
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const particlesBuffer = device.createBuffer({
-    size: numParticles * particleInstanceByteSize,
-    usage: GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE,
-  });
-
-  const renderPipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: particleWGSL,
-      }),
-      entryPoint: 'vs_main',
-      buffers: [
-        {
-          // instanced particles buffer
-          arrayStride: particleInstanceByteSize,
-          stepMode: 'instance',
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: particlePositionOffset,
-              format: 'float32x3',
-            },
-            {
-              // color
-              shaderLocation: 1,
-              offset: particleColorOffset,
-              format: 'float32x4',
-            },
-          ],
-        },
-        {
-          // quad vertex buffer
-          arrayStride: 2 * 4, // vec2
-          stepMode: 'vertex',
-          attributes: [
-            {
-              // vertex positions
-              shaderLocation: 2,
-              offset: 0,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: particleWGSL,
-      }),
-      entryPoint: 'fs_main',
-      targets: [
-        {
-          format: presentationFormat,
-          blend: {
-            color: {
-              srcFactor: 'src-alpha',
-              dstFactor: 'one',
-              operation: 'add',
-            },
-            alpha: {
-              srcFactor: 'zero',
-              dstFactor: 'one',
-              operation: 'add',
-            },
-          },
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-
-    depthStencil: {
-      depthWriteEnabled: false,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const uniformBufferSize =
-    4 * 4 * 4 + // modelViewProjectionMatrix : mat4x4
-    3 * 4 + // right : vec3
-    4 + // padding
-    3 * 4 + // up : vec3
-    4 + // padding
-    0;
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: renderPipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-        clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Quad vertex buffer
-  //////////////////////////////////////////////////////////////////////////////
-  const quadVertexBuffer = device.createBuffer({
-    size: 6 * 2 * 4, // 6x vec2
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  // prettier-ignore
-  const vertexData = [
-    -1.0, -1.0, +1.0, -1.0, -1.0, +1.0, -1.0, +1.0, +1.0, -1.0, +1.0, +1.0,
-  ];
-  new Float32Array(quadVertexBuffer.getMappedRange()).set(vertexData);
-  quadVertexBuffer.unmap();
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Texture
-  //////////////////////////////////////////////////////////////////////////////
-  let texture: GPUTexture;
-  let textureWidth = 1;
-  let textureHeight = 1;
-  let numMipLevels = 1;
-  {
-    const response = await fetch('../assets/img/webgpu.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-
-    // Calculate number of mip levels required to generate the probability map
-    while (
-      textureWidth < imageBitmap.width ||
-      textureHeight < imageBitmap.height
-    ) {
-      textureWidth *= 2;
-      textureHeight *= 2;
-      numMipLevels++;
-    }
-    texture = device.createTexture({
-      size: [imageBitmap.width, imageBitmap.height, 1],
-      mipLevelCount: numMipLevels,
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.TEXTURE_BINDING |
-        GPUTextureUsage.STORAGE_BINDING |
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.RENDER_ATTACHMENT,
-    });
-    device.queue.copyExternalImageToTexture(
-      { source: imageBitmap },
-      { texture: texture },
-      [imageBitmap.width, imageBitmap.height]
-    );
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Probability map generation
-  // The 0'th mip level of texture holds the color data and spawn-probability in
-  // the alpha channel. The mip levels 1..N are generated to hold spawn
-  // probabilities up to the top 1x1 mip level.
-  //////////////////////////////////////////////////////////////////////////////
-  {
-    const probabilityMapImportLevelPipeline = device.createComputePipeline({
-      layout: 'auto',
-      compute: {
-        module: device.createShaderModule({ code: probabilityMapWGSL }),
-        entryPoint: 'import_level',
-      },
-    });
-    const probabilityMapExportLevelPipeline = device.createComputePipeline({
-      layout: 'auto',
-      compute: {
-        module: device.createShaderModule({ code: probabilityMapWGSL }),
-        entryPoint: 'export_level',
-      },
-    });
-
-    const probabilityMapUBOBufferSize =
-      1 * 4 + // stride
-      3 * 4 + // padding
-      0;
-    const probabilityMapUBOBuffer = device.createBuffer({
-      size: probabilityMapUBOBufferSize,
-      usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-    });
-    const buffer_a = device.createBuffer({
-      size: textureWidth * textureHeight * 4,
-      usage: GPUBufferUsage.STORAGE,
-    });
-    const buffer_b = device.createBuffer({
-      size: textureWidth * textureHeight * 4,
-      usage: GPUBufferUsage.STORAGE,
-    });
-    device.queue.writeBuffer(
-      probabilityMapUBOBuffer,
-      0,
-      new Int32Array([textureWidth])
-    );
-    const commandEncoder = device.createCommandEncoder();
-    for (let level = 0; level < numMipLevels; level++) {
-      const levelWidth = textureWidth >> level;
-      const levelHeight = textureHeight >> level;
-      const pipeline =
-        level == 0
-          ? probabilityMapImportLevelPipeline.getBindGroupLayout(0)
-          : probabilityMapExportLevelPipeline.getBindGroupLayout(0);
-      const probabilityMapBindGroup = device.createBindGroup({
-        layout: pipeline,
-        entries: [
-          {
-            // ubo
-            binding: 0,
-            resource: { buffer: probabilityMapUBOBuffer },
-          },
-          {
-            // buf_in
-            binding: 1,
-            resource: { buffer: level & 1 ? buffer_a : buffer_b },
-          },
-          {
-            // buf_out
-            binding: 2,
-            resource: { buffer: level & 1 ? buffer_b : buffer_a },
-          },
-          {
-            // tex_in / tex_out
-            binding: 3,
-            resource: texture.createView({
-              format: 'rgba8unorm',
-              dimension: '2d',
-              baseMipLevel: level,
-              mipLevelCount: 1,
-            }),
-          },
-        ],
-      });
-      if (level == 0) {
-        const passEncoder = commandEncoder.beginComputePass();
-        passEncoder.setPipeline(probabilityMapImportLevelPipeline);
-        passEncoder.setBindGroup(0, probabilityMapBindGroup);
-        passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);
-        passEncoder.end();
-      } else {
-        const passEncoder = commandEncoder.beginComputePass();
-        passEncoder.setPipeline(probabilityMapExportLevelPipeline);
-        passEncoder.setBindGroup(0, probabilityMapBindGroup);
-        passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);
-        passEncoder.end();
-      }
-    }
-    device.queue.submit([commandEncoder.finish()]);
-  }
-
-  //////////////////////////////////////////////////////////////////////////////
-  // Simulation compute pipeline
-  //////////////////////////////////////////////////////////////////////////////
-  const simulationParams = {
-    simulate: true,
-    deltaTime: 0.04,
-  };
-
-  const simulationUBOBufferSize =
-    1 * 4 + // deltaTime
-    3 * 4 + // padding
-    4 * 4 + // seed
-    0;
-  const simulationUBOBuffer = device.createBuffer({
-    size: simulationUBOBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  Object.keys(simulationParams).forEach((k) => {
-    gui.add(simulationParams, k);
-  });
-
-  const computePipeline = device.createComputePipeline({
-    layout: 'auto',
-    compute: {
-      module: device.createShaderModule({
-        code: particleWGSL,
-      }),
-      entryPoint: 'simulate',
-    },
-  });
-  const computeBindGroup = device.createBindGroup({
-    layout: computePipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: simulationUBOBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: {
-          buffer: particlesBuffer,
-          offset: 0,
-          size: numParticles * particleInstanceByteSize,
-        },
-      },
-      {
-        binding: 2,
-        resource: texture.createView(),
-      },
-    ],
-  });
-
-  const aspect = canvas.width / canvas.height;
-  const projection = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 100.0);
-  const view = mat4.create();
-  const mvp = mat4.create();
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    device.queue.writeBuffer(
-      simulationUBOBuffer,
-      0,
-      new Float32Array([
-        simulationParams.simulate ? simulationParams.deltaTime : 0.0,
-        0.0,
-        0.0,
-        0.0, // padding
-        Math.random() * 100,
-        Math.random() * 100, // seed.xy
-        1 + Math.random(),
-        1 + Math.random(), // seed.zw
-      ])
-    );
-
-    mat4.identity(view);
-    mat4.translate(view, vec3.fromValues(0, 0, -3), view);
-    mat4.rotateX(view, Math.PI * -0.2, view);
-    mat4.multiply(projection, view, mvp);
-
-    // prettier-ignore
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      new Float32Array([
-        // modelViewProjectionMatrix
-        mvp[0], mvp[1], mvp[2], mvp[3],
-        mvp[4], mvp[5], mvp[6], mvp[7],
-        mvp[8], mvp[9], mvp[10], mvp[11],
-        mvp[12], mvp[13], mvp[14], mvp[15],
-
-        view[0], view[4], view[8], // right
-
-        0, // padding
-
-        view[1], view[5], view[9], // up
-
-        0, // padding
-      ])
-    );
-    const swapChainTexture = context.getCurrentTexture();
-    // prettier-ignore
-    renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    {
-      const passEncoder = commandEncoder.beginComputePass();
-      passEncoder.setPipeline(computePipeline);
-      passEncoder.setBindGroup(0, computeBindGroup);
-      passEncoder.dispatchWorkgroups(Math.ceil(numParticles / 64));
-      passEncoder.end();
-    }
-    {
-      const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-      passEncoder.setPipeline(renderPipeline);
-      passEncoder.setBindGroup(0, uniformBindGroup);
-      passEncoder.setVertexBuffer(0, particlesBuffer);
-      passEncoder.setVertexBuffer(1, quadVertexBuffer);
-      passEncoder.draw(6, numParticles, 0, 0);
-      passEncoder.end();
-    }
-
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const Particles: () => JSX.Element = () =>
-  makeSample({
-    name: 'Particles',
-    description:
-      'This example demonstrates rendering of particles simulated with compute shaders.',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './particle.wgsl',
-        contents: particleWGSL,
-        editable: true,
-      },
-      {
-        name: './probabilityMap.wgsl',
-        contents: probabilityMapWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default Particles;
diff --git a/src/sample/renderBundles/main.ts b/src/sample/renderBundles/main.ts
deleted file mode 100644
index bbe243d2..00000000
--- a/src/sample/renderBundles/main.ts
+++ /dev/null
@@ -1,451 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import { createSphereMesh, SphereLayout } from '../../meshes/sphere';
-
-import meshWGSL from './mesh.wgsl';
-
-interface Renderable {
-  vertices: GPUBuffer;
-  indices: GPUBuffer;
-  indexCount: number;
-  bindGroup?: GPUBindGroup;
-}
-
-const init: SampleInit = async ({ canvas, pageState, gui, stats }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-
-  const settings = {
-    useRenderBundles: true,
-    asteroidCount: 5000,
-  };
-  gui.add(settings, 'useRenderBundles');
-  gui.add(settings, 'asteroidCount', 1000, 10000, 1000).onChange(() => {
-    // If the content of the scene changes the render bundle must be recreated.
-    ensureEnoughAsteroids();
-    updateRenderBundle();
-  });
-
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const shaderModule = device.createShaderModule({
-    code: meshWGSL,
-  });
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: shaderModule,
-      entryPoint: 'vertexMain',
-      buffers: [
-        {
-          arrayStride: SphereLayout.vertexStride,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: SphereLayout.positionsOffset,
-              format: 'float32x3',
-            },
-            {
-              // normal
-              shaderLocation: 1,
-              offset: SphereLayout.normalOffset,
-              format: 'float32x3',
-            },
-            {
-              // uv
-              shaderLocation: 2,
-              offset: SphereLayout.uvOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: shaderModule,
-      entryPoint: 'fragmentMain',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Backface culling since the sphere is solid piece of geometry.
-      // Faces pointing away from the camera will be occluded by faces
-      // pointing toward the camera.
-      cullMode: 'back',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const uniformBufferSize = 4 * 16; // 4x4 matrix
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  // Fetch the images and upload them into a GPUTexture.
-  let planetTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/saturn.jpg');
-    const imageBitmap = await createImageBitmap(await response.blob());
-
-    planetTexture = device.createTexture({
-      size: [imageBitmap.width, imageBitmap.height, 1],
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.TEXTURE_BINDING |
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.RENDER_ATTACHMENT,
-    });
-    device.queue.copyExternalImageToTexture(
-      { source: imageBitmap },
-      { texture: planetTexture },
-      [imageBitmap.width, imageBitmap.height]
-    );
-  }
-
-  let moonTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/moon.jpg');
-    const imageBitmap = await createImageBitmap(await response.blob());
-
-    moonTexture = device.createTexture({
-      size: [imageBitmap.width, imageBitmap.height, 1],
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.TEXTURE_BINDING |
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.RENDER_ATTACHMENT,
-    });
-    device.queue.copyExternalImageToTexture(
-      { source: imageBitmap },
-      { texture: moonTexture },
-      [imageBitmap.width, imageBitmap.height]
-    );
-  }
-
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  // Helper functions to create the required meshes and bind groups for each sphere.
-  function createSphereRenderable(
-    radius: number,
-    widthSegments = 32,
-    heightSegments = 16,
-    randomness = 0
-  ): Renderable {
-    const sphereMesh = createSphereMesh(
-      radius,
-      widthSegments,
-      heightSegments,
-      randomness
-    );
-
-    // Create a vertex buffer from the sphere data.
-    const vertices = device.createBuffer({
-      size: sphereMesh.vertices.byteLength,
-      usage: GPUBufferUsage.VERTEX,
-      mappedAtCreation: true,
-    });
-    new Float32Array(vertices.getMappedRange()).set(sphereMesh.vertices);
-    vertices.unmap();
-
-    const indices = device.createBuffer({
-      size: sphereMesh.indices.byteLength,
-      usage: GPUBufferUsage.INDEX,
-      mappedAtCreation: true,
-    });
-    new Uint16Array(indices.getMappedRange()).set(sphereMesh.indices);
-    indices.unmap();
-
-    return {
-      vertices,
-      indices,
-      indexCount: sphereMesh.indices.length,
-    };
-  }
-
-  function createSphereBindGroup(
-    texture: GPUTexture,
-    transform: Float32Array
-  ): GPUBindGroup {
-    const uniformBufferSize = 4 * 16; // 4x4 matrix
-    const uniformBuffer = device.createBuffer({
-      size: uniformBufferSize,
-      usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-      mappedAtCreation: true,
-    });
-    new Float32Array(uniformBuffer.getMappedRange()).set(transform);
-    uniformBuffer.unmap();
-
-    const bindGroup = device.createBindGroup({
-      layout: pipeline.getBindGroupLayout(1),
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-          },
-        },
-        {
-          binding: 1,
-          resource: sampler,
-        },
-        {
-          binding: 2,
-          resource: texture.createView(),
-        },
-      ],
-    });
-
-    return bindGroup;
-  }
-
-  const transform = mat4.create() as Float32Array;
-  mat4.identity(transform);
-
-  // Create one large central planet surrounded by a large ring of asteroids
-  const planet = createSphereRenderable(1.0);
-  planet.bindGroup = createSphereBindGroup(planetTexture, transform);
-
-  const asteroids = [
-    createSphereRenderable(0.01, 8, 6, 0.15),
-    createSphereRenderable(0.013, 8, 6, 0.15),
-    createSphereRenderable(0.017, 8, 6, 0.15),
-    createSphereRenderable(0.02, 8, 6, 0.15),
-    createSphereRenderable(0.03, 16, 8, 0.15),
-  ];
-
-  const renderables = [planet];
-
-  function ensureEnoughAsteroids() {
-    for (let i = renderables.length; i <= settings.asteroidCount; ++i) {
-      // Place copies of the asteroid in a ring.
-      const radius = Math.random() * 1.7 + 1.25;
-      const angle = Math.random() * Math.PI * 2;
-      const x = Math.sin(angle) * radius;
-      const y = (Math.random() - 0.5) * 0.015;
-      const z = Math.cos(angle) * radius;
-
-      mat4.identity(transform);
-      mat4.translate(transform, [x, y, z], transform);
-      mat4.rotateX(transform, Math.random() * Math.PI, transform);
-      mat4.rotateY(transform, Math.random() * Math.PI, transform);
-      renderables.push({
-        ...asteroids[i % asteroids.length],
-        bindGroup: createSphereBindGroup(moonTexture, transform),
-      });
-    }
-  }
-  ensureEnoughAsteroids();
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-  const modelViewProjectionMatrix = mat4.create();
-
-  const frameBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-    ],
-  });
-
-  function getTransformationMatrix() {
-    const viewMatrix = mat4.identity();
-    mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);
-    const now = Date.now() / 1000;
-    // Tilt the view matrix so the planet looks like it's off-axis.
-    mat4.rotateZ(viewMatrix, Math.PI * 0.1, viewMatrix);
-    mat4.rotateX(viewMatrix, Math.PI * 0.1, viewMatrix);
-    // Rotate the view matrix slowly so the planet appears to spin.
-    mat4.rotateY(viewMatrix, now * 0.05, viewMatrix);
-
-    mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);
-
-    return modelViewProjectionMatrix as Float32Array;
-  }
-
-  // Render bundles function as partial, limited render passes, so we can use the
-  // same code both to render the scene normally and to build the render bundle.
-  function renderScene(
-    passEncoder: GPURenderPassEncoder | GPURenderBundleEncoder
-  ) {
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setBindGroup(0, frameBindGroup);
-
-    // Loop through every renderable object and draw them individually.
-    // (Because many of these meshes are repeated, with only the transforms
-    // differing, instancing would be highly effective here. This sample
-    // intentionally avoids using instancing in order to emulate a more complex
-    // scene, which helps demonstrate the potential time savings a render bundle
-    // can provide.)
-    let count = 0;
-    for (const renderable of renderables) {
-      passEncoder.setBindGroup(1, renderable.bindGroup);
-      passEncoder.setVertexBuffer(0, renderable.vertices);
-      passEncoder.setIndexBuffer(renderable.indices, 'uint16');
-      passEncoder.drawIndexed(renderable.indexCount);
-
-      if (++count > settings.asteroidCount) {
-        break;
-      }
-    }
-  }
-
-  // The render bundle can be encoded once and re-used as many times as needed.
-  // Because it encodes all of the commands needed to render at the GPU level,
-  // those commands will not need to execute the associated JavaScript code upon
-  // execution or be re-validated, which can represent a significant time savings.
-  //
-  // However, because render bundles are immutable once created, they are only
-  // appropriate for rendering content where the same commands will be executed
-  // every time, with the only changes being the contents of the buffers and
-  // textures used. Cases where the executed commands differ from frame-to-frame,
-  // such as when using frustrum or occlusion culling, will not benefit from
-  // using render bundles as much.
-  let renderBundle;
-  function updateRenderBundle() {
-    const renderBundleEncoder = device.createRenderBundleEncoder({
-      colorFormats: [presentationFormat],
-      depthStencilFormat: 'depth24plus',
-    });
-    renderScene(renderBundleEncoder);
-    renderBundle = renderBundleEncoder.finish();
-  }
-  updateRenderBundle();
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    stats.begin();
-
-    const transformationMatrix = getTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      transformationMatrix.buffer,
-      transformationMatrix.byteOffset,
-      transformationMatrix.byteLength
-    );
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-
-    if (settings.useRenderBundles) {
-      // Executing a bundle is equivalent to calling all of the commands encoded
-      // in the render bundle as part of the current render pass.
-      passEncoder.executeBundles([renderBundle]);
-    } else {
-      // Alternatively, the same render commands can be encoded manually, which
-      // can take longer since each command needs to be interpreted by the
-      // JavaScript virtual machine and re-validated each time.
-      renderScene(passEncoder);
-    }
-
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    stats.end();
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const RenderBundles: () => JSX.Element = () =>
-  makeSample({
-    name: 'Render Bundles',
-    description: `This example shows how to use render bundles. It renders a large number of
-      meshes individually as a proxy for a more complex scene in order to demonstrate the reduction
-      in JavaScript time spent to issue render commands. (Typically a scene like this would make use
-      of instancing to reduce draw overhead.)`,
-    gui: true,
-    stats: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './mesh.wgsl',
-        contents: meshWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/sphere.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/sphere.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default RenderBundles;
diff --git a/src/sample/resizeCanvas/main.ts b/src/sample/resizeCanvas/main.ts
deleted file mode 100644
index ff09fa55..00000000
--- a/src/sample/resizeCanvas/main.ts
+++ /dev/null
@@ -1,155 +0,0 @@
-import { assert, makeSample, SampleInit } from '../../components/SampleLayout';
-
-import triangleVertWGSL from '../../shaders/triangle.vert.wgsl';
-import redFragWGSL from '../../shaders/red.frag.wgsl';
-
-import styles from './animatedCanvasSize.module.css';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  assert(adapter, 'requestAdapter returned null');
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const sampleCount = 4;
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: triangleVertWGSL,
-      }),
-      entryPoint: 'main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: redFragWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-    multisample: {
-      count: 4,
-    },
-  });
-
-  let renderTarget: GPUTexture | undefined = undefined;
-  let renderTargetView: GPUTextureView;
-
-  canvas.classList.add(styles.animatedCanvasSize);
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const currentWidth = canvas.clientWidth * devicePixelRatio;
-    const currentHeight = canvas.clientHeight * devicePixelRatio;
-
-    // The canvas size is animating via CSS.
-    // When the size changes, we need to reallocate the render target.
-    // We also need to set the physical size of the canvas to match the computed CSS size.
-    if (
-      (currentWidth !== canvas.width || currentHeight !== canvas.height) &&
-      currentWidth &&
-      currentHeight
-    ) {
-      if (renderTarget !== undefined) {
-        // Destroy the previous render target
-        renderTarget.destroy();
-      }
-
-      // Setting the canvas width and height will automatically resize the textures returned
-      // when calling getCurrentTexture() on the context.
-      canvas.width = currentWidth;
-      canvas.height = currentHeight;
-
-      // Resize the multisampled render target to match the new canvas size.
-      renderTarget = device.createTexture({
-        size: [canvas.width, canvas.height],
-        sampleCount,
-        format: presentationFormat,
-        usage: GPUTextureUsage.RENDER_ATTACHMENT,
-      });
-
-      renderTargetView = renderTarget.createView();
-    }
-
-    const commandEncoder = device.createCommandEncoder();
-
-    const renderPassDescriptor: GPURenderPassDescriptor = {
-      colorAttachments: [
-        {
-          view: renderTargetView,
-          resolveTarget: context.getCurrentTexture().createView(),
-          clearValue: { r: 0.2, g: 0.2, b: 0.2, a: 1.0 },
-          loadOp: 'clear',
-          storeOp: 'store',
-        },
-      ],
-    };
-
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.draw(3);
-    passEncoder.end();
-
-    device.queue.submit([commandEncoder.finish()]);
-    requestAnimationFrame(frame);
-  }
-
-  requestAnimationFrame(frame);
-};
-
-const ResizeCanvas: () => JSX.Element = () =>
-  makeSample({
-    name: 'Resize Canvas',
-    description:
-      'Shows multisampled rendering a basic triangle on a dynamically sized canvas.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/triangle.vert.wgsl',
-        contents: triangleVertWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/red.frag.wgsl',
-        contents: redFragWGSL,
-        editable: true,
-      },
-      {
-        name: './animatedCanvasSize.module.css',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./animatedCanvasSize.module.css')
-          .default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default ResizeCanvas;
diff --git a/src/sample/reversedZ/main.ts b/src/sample/reversedZ/main.ts
deleted file mode 100644
index dba7dff0..00000000
--- a/src/sample/reversedZ/main.ts
+++ /dev/null
@@ -1,756 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import { mat4, vec3 } from 'wgpu-matrix';
-
-import vertexWGSL from './vertex.wgsl';
-import fragmentWGSL from './fragment.wgsl';
-import vertexDepthPrePassWGSL from './vertexDepthPrePass.wgsl';
-import vertexTextureQuadWGSL from './vertexTextureQuad.wgsl';
-import fragmentTextureQuadWGSL from './fragmentTextureQuad.wgsl';
-import vertexPrecisionErrorPassWGSL from './vertexPrecisionErrorPass.wgsl';
-import fragmentPrecisionErrorPassWGSL from './fragmentPrecisionErrorPass.wgsl';
-
-// Two planes close to each other for depth precision test
-const geometryVertexSize = 4 * 8; // Byte size of one geometry vertex.
-const geometryPositionOffset = 0;
-const geometryColorOffset = 4 * 4; // Byte offset of geometry vertex color attribute.
-const geometryDrawCount = 6 * 2;
-
-const d = 0.0001; // half distance between two planes
-const o = 0.5; // half x offset to shift planes so they are only partially overlaping
-
-// prettier-ignore
-export const geometryVertexArray = new Float32Array([
-  // float4 position, float4 color
-  -1 - o, -1, d, 1, 1, 0, 0, 1,
-  1 - o, -1, d, 1, 1, 0, 0, 1,
-  -1 - o, 1, d, 1, 1, 0, 0, 1,
-  1 - o, -1, d, 1, 1, 0, 0, 1,
-  1 - o, 1, d, 1, 1, 0, 0, 1,
-  -1 - o, 1, d, 1, 1, 0, 0, 1,
-
-  -1 + o, -1, -d, 1, 0, 1, 0, 1,
-  1 + o, -1, -d, 1, 0, 1, 0, 1,
-  -1 + o, 1, -d, 1, 0, 1, 0, 1,
-  1 + o, -1, -d, 1, 0, 1, 0, 1,
-  1 + o, 1, -d, 1, 0, 1, 0, 1,
-  -1 + o, 1, -d, 1, 0, 1, 0, 1,
-]);
-
-const xCount = 1;
-const yCount = 5;
-const numInstances = xCount * yCount;
-const matrixFloatCount = 16; // 4x4 matrix
-const matrixStride = 4 * matrixFloatCount; // 64;
-
-const depthRangeRemapMatrix = mat4.identity();
-depthRangeRemapMatrix[10] = -1;
-depthRangeRemapMatrix[14] = 1;
-
-enum DepthBufferMode {
-  Default = 0,
-  Reversed,
-}
-
-const depthBufferModes: DepthBufferMode[] = [
-  DepthBufferMode.Default,
-  DepthBufferMode.Reversed,
-];
-const depthCompareFuncs = {
-  [DepthBufferMode.Default]: 'less' as GPUCompareFunction,
-  [DepthBufferMode.Reversed]: 'greater' as GPUCompareFunction,
-};
-const depthClearValues = {
-  [DepthBufferMode.Default]: 1.0,
-  [DepthBufferMode.Reversed]: 0.0,
-};
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const verticesBuffer = device.createBuffer({
-    size: geometryVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(geometryVertexArray);
-  verticesBuffer.unmap();
-
-  const depthBufferFormat = 'depth32float';
-
-  const depthTextureBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.FRAGMENT,
-        texture: {
-          sampleType: 'depth',
-        },
-      },
-    ],
-  });
-
-  // Model, view, projection matrices
-  const uniformBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-    ],
-  });
-
-  const depthPrePassRenderPipelineLayout = device.createPipelineLayout({
-    bindGroupLayouts: [uniformBindGroupLayout],
-  });
-
-  // depthPrePass is used to render scene to the depth texture
-  // this is not needed if you just want to use reversed z to render a scene
-  const depthPrePassRenderPipelineDescriptorBase = {
-    layout: depthPrePassRenderPipelineLayout,
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexDepthPrePassWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: geometryVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: geometryPositionOffset,
-              format: 'float32x4',
-            },
-          ],
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-      cullMode: 'back',
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: depthBufferFormat,
-    },
-  } as GPURenderPipelineDescriptor;
-
-  // we need the depthCompare to fit the depth buffer mode we are using.
-  // this is the same for other passes
-  const depthPrePassPipelines: GPURenderPipeline[] = [];
-  depthPrePassRenderPipelineDescriptorBase.depthStencil.depthCompare =
-    depthCompareFuncs[DepthBufferMode.Default];
-  depthPrePassPipelines[DepthBufferMode.Default] = device.createRenderPipeline(
-    depthPrePassRenderPipelineDescriptorBase
-  );
-  depthPrePassRenderPipelineDescriptorBase.depthStencil.depthCompare =
-    depthCompareFuncs[DepthBufferMode.Reversed];
-  depthPrePassPipelines[DepthBufferMode.Reversed] = device.createRenderPipeline(
-    depthPrePassRenderPipelineDescriptorBase
-  );
-
-  // precisionPass is to draw precision error as color of depth value stored in depth buffer
-  // compared to that directly calcualated in the shader
-  const precisionPassRenderPipelineLayout = device.createPipelineLayout({
-    bindGroupLayouts: [uniformBindGroupLayout, depthTextureBindGroupLayout],
-  });
-  const precisionPassRenderPipelineDescriptorBase = {
-    layout: precisionPassRenderPipelineLayout,
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexPrecisionErrorPassWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: geometryVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: geometryPositionOffset,
-              format: 'float32x4',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentPrecisionErrorPassWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-      cullMode: 'back',
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: depthBufferFormat,
-    },
-  } as GPURenderPipelineDescriptor;
-  const precisionPassPipelines: GPURenderPipeline[] = [];
-  precisionPassRenderPipelineDescriptorBase.depthStencil.depthCompare =
-    depthCompareFuncs[DepthBufferMode.Default];
-  precisionPassPipelines[DepthBufferMode.Default] = device.createRenderPipeline(
-    precisionPassRenderPipelineDescriptorBase
-  );
-  precisionPassRenderPipelineDescriptorBase.depthStencil.depthCompare =
-    depthCompareFuncs[DepthBufferMode.Reversed];
-  // prettier-ignore
-  precisionPassPipelines[DepthBufferMode.Reversed] = device.createRenderPipeline(
-    precisionPassRenderPipelineDescriptorBase
-  );
-
-  // colorPass is the regular render pass to render the scene
-  const colorPassRenderPiplineLayout = device.createPipelineLayout({
-    bindGroupLayouts: [uniformBindGroupLayout],
-  });
-  const colorPassRenderPipelineDescriptorBase: GPURenderPipelineDescriptor = {
-    layout: colorPassRenderPiplineLayout,
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: geometryVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: geometryPositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // color
-              shaderLocation: 1,
-              offset: geometryColorOffset,
-              format: 'float32x4',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-      cullMode: 'back',
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: depthBufferFormat,
-    },
-  };
-  const colorPassPipelines: GPURenderPipeline[] = [];
-  colorPassRenderPipelineDescriptorBase.depthStencil.depthCompare =
-    depthCompareFuncs[DepthBufferMode.Default];
-  colorPassPipelines[DepthBufferMode.Default] = device.createRenderPipeline(
-    colorPassRenderPipelineDescriptorBase
-  );
-  colorPassRenderPipelineDescriptorBase.depthStencil.depthCompare =
-    depthCompareFuncs[DepthBufferMode.Reversed];
-  colorPassPipelines[DepthBufferMode.Reversed] = device.createRenderPipeline(
-    colorPassRenderPipelineDescriptorBase
-  );
-
-  // textureQuadPass is draw a full screen quad of depth texture
-  // to see the difference of depth value using reversed z compared to default depth buffer usage
-  // 0.0 will be the furthest and 1.0 will be the closest
-  const textureQuadPassPiplineLayout = device.createPipelineLayout({
-    bindGroupLayouts: [depthTextureBindGroupLayout],
-  });
-  const textureQuadPassPipline = device.createRenderPipeline({
-    layout: textureQuadPassPiplineLayout,
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexTextureQuadWGSL,
-      }),
-      entryPoint: 'main',
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentTextureQuadWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: depthBufferFormat,
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
-  });
-  const depthTextureView = depthTexture.createView();
-
-  const defaultDepthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: depthBufferFormat,
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-  const defaultDepthTextureView = defaultDepthTexture.createView();
-
-  const depthPrePassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [],
-    depthStencilAttachment: {
-      view: depthTextureView,
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  // drawPassDescriptor and drawPassLoadDescriptor are used for drawing
-  // the scene twice using different depth buffer mode on splitted viewport
-  // of the same canvas
-  // see the difference of the loadOp of the colorAttachments
-  const drawPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        // view is acquired and set in render loop.
-        view: undefined,
-
-        clearValue: { r: 0.0, g: 0.0, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: defaultDepthTextureView,
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-  const drawPassLoadDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        // attachment is acquired and set in render loop.
-        view: undefined,
-
-        loadOp: 'load',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: defaultDepthTextureView,
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-  const drawPassDescriptors = [drawPassDescriptor, drawPassLoadDescriptor];
-
-  const textureQuadPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        // view is acquired and set in render loop.
-        view: undefined,
-
-        clearValue: { r: 0.0, g: 0.0, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-  };
-  const textureQuadPassLoadDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        // view is acquired and set in render loop.
-        view: undefined,
-
-        loadOp: 'load',
-        storeOp: 'store',
-      },
-    ],
-  };
-  const textureQuadPassDescriptors = [
-    textureQuadPassDescriptor,
-    textureQuadPassLoadDescriptor,
-  ];
-
-  const depthTextureBindGroup = device.createBindGroup({
-    layout: depthTextureBindGroupLayout,
-    entries: [
-      {
-        binding: 0,
-        resource: depthTextureView,
-      },
-    ],
-  });
-
-  const uniformBufferSize = numInstances * matrixStride;
-
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-  const cameraMatrixBuffer = device.createBuffer({
-    size: 4 * 16, // 4x4 matrix
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-  const cameraMatrixReversedDepthBuffer = device.createBuffer({
-    size: 4 * 16, // 4x4 matrix
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const uniformBindGroups = [
-    device.createBindGroup({
-      layout: uniformBindGroupLayout,
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-          },
-        },
-        {
-          binding: 1,
-          resource: {
-            buffer: cameraMatrixBuffer,
-          },
-        },
-      ],
-    }),
-    device.createBindGroup({
-      layout: uniformBindGroupLayout,
-      entries: [
-        {
-          binding: 0,
-          resource: {
-            buffer: uniformBuffer,
-          },
-        },
-        {
-          binding: 1,
-          resource: {
-            buffer: cameraMatrixReversedDepthBuffer,
-          },
-        },
-      ],
-    }),
-  ];
-
-  type Mat4 = mat4.default;
-  const modelMatrices = new Array(numInstances);
-  const mvpMatricesData = new Float32Array(matrixFloatCount * numInstances);
-
-  let m = 0;
-  for (let x = 0; x < xCount; x++) {
-    for (let y = 0; y < yCount; y++) {
-      const z = -800 * m;
-      const s = 1 + 50 * m;
-
-      modelMatrices[m] = mat4.translation(
-        vec3.fromValues(
-          x - xCount / 2 + 0.5,
-          (4.0 - 0.2 * z) * (y - yCount / 2 + 1.0),
-          z
-        )
-      );
-      mat4.scale(modelMatrices[m], vec3.fromValues(s, s, s), modelMatrices[m]);
-
-      m++;
-    }
-  }
-
-  const viewMatrix = mat4.translation(vec3.fromValues(0, 0, -12));
-
-  const aspect = (0.5 * canvas.width) / canvas.height;
-  // wgpu-matrix perspective doesn't handle zFar === Infinity now.
-  // https://github.com/greggman/wgpu-matrix/issues/9
-  const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 5, 9999);
-
-  const viewProjectionMatrix = mat4.multiply(projectionMatrix, viewMatrix);
-  // to use 1/z we just multiple depthRangeRemapMatrix to our default camera view projection matrix
-  const reversedRangeViewProjectionMatrix = mat4.multiply(
-    depthRangeRemapMatrix,
-    viewProjectionMatrix
-  );
-
-  let bufferData = viewProjectionMatrix as Float32Array;
-  device.queue.writeBuffer(
-    cameraMatrixBuffer,
-    0,
-    bufferData.buffer,
-    bufferData.byteOffset,
-    bufferData.byteLength
-  );
-  bufferData = reversedRangeViewProjectionMatrix as Float32Array;
-  device.queue.writeBuffer(
-    cameraMatrixReversedDepthBuffer,
-    0,
-    bufferData.buffer,
-    bufferData.byteOffset,
-    bufferData.byteLength
-  );
-
-  const tmpMat4 = mat4.create();
-  function updateTransformationMatrix() {
-    const now = Date.now() / 1000;
-
-    for (let i = 0, m = 0; i < numInstances; i++, m += matrixFloatCount) {
-      mat4.rotate(
-        modelMatrices[i],
-        vec3.fromValues(Math.sin(now), Math.cos(now), 0),
-        (Math.PI / 180) * 30,
-        tmpMat4
-      );
-      mvpMatricesData.set(tmpMat4, m);
-    }
-  }
-
-  const settings = {
-    mode: 'color',
-  };
-  gui.add(settings, 'mode', ['color', 'precision-error', 'depth-texture']);
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    updateTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      mvpMatricesData.buffer,
-      mvpMatricesData.byteOffset,
-      mvpMatricesData.byteLength
-    );
-
-    const attachment = context.getCurrentTexture().createView();
-    const commandEncoder = device.createCommandEncoder();
-    if (settings.mode === 'color') {
-      for (const m of depthBufferModes) {
-        drawPassDescriptors[m].colorAttachments[0].view = attachment;
-        drawPassDescriptors[m].depthStencilAttachment.depthClearValue =
-          depthClearValues[m];
-        const colorPass = commandEncoder.beginRenderPass(
-          drawPassDescriptors[m]
-        );
-        colorPass.setPipeline(colorPassPipelines[m]);
-        colorPass.setBindGroup(0, uniformBindGroups[m]);
-        colorPass.setVertexBuffer(0, verticesBuffer);
-        colorPass.setViewport(
-          (canvas.width * m) / 2,
-          0,
-          canvas.width / 2,
-          canvas.height,
-          0,
-          1
-        );
-        colorPass.draw(geometryDrawCount, numInstances, 0, 0);
-        colorPass.end();
-      }
-    } else if (settings.mode === 'precision-error') {
-      for (const m of depthBufferModes) {
-        {
-          depthPrePassDescriptor.depthStencilAttachment.depthClearValue =
-            depthClearValues[m];
-          const depthPrePass = commandEncoder.beginRenderPass(
-            depthPrePassDescriptor
-          );
-          depthPrePass.setPipeline(depthPrePassPipelines[m]);
-          depthPrePass.setBindGroup(0, uniformBindGroups[m]);
-          depthPrePass.setVertexBuffer(0, verticesBuffer);
-          depthPrePass.setViewport(
-            (canvas.width * m) / 2,
-            0,
-            canvas.width / 2,
-            canvas.height,
-            0,
-            1
-          );
-          depthPrePass.draw(geometryDrawCount, numInstances, 0, 0);
-          depthPrePass.end();
-        }
-        {
-          drawPassDescriptors[m].colorAttachments[0].view = attachment;
-          drawPassDescriptors[m].depthStencilAttachment.depthClearValue =
-            depthClearValues[m];
-          const precisionErrorPass = commandEncoder.beginRenderPass(
-            drawPassDescriptors[m]
-          );
-          precisionErrorPass.setPipeline(precisionPassPipelines[m]);
-          precisionErrorPass.setBindGroup(0, uniformBindGroups[m]);
-          precisionErrorPass.setBindGroup(1, depthTextureBindGroup);
-          precisionErrorPass.setVertexBuffer(0, verticesBuffer);
-          precisionErrorPass.setViewport(
-            (canvas.width * m) / 2,
-            0,
-            canvas.width / 2,
-            canvas.height,
-            0,
-            1
-          );
-          precisionErrorPass.draw(geometryDrawCount, numInstances, 0, 0);
-          precisionErrorPass.end();
-        }
-      }
-    } else {
-      // depth texture quad
-      for (const m of depthBufferModes) {
-        {
-          depthPrePassDescriptor.depthStencilAttachment.depthClearValue =
-            depthClearValues[m];
-          const depthPrePass = commandEncoder.beginRenderPass(
-            depthPrePassDescriptor
-          );
-          depthPrePass.setPipeline(depthPrePassPipelines[m]);
-          depthPrePass.setBindGroup(0, uniformBindGroups[m]);
-          depthPrePass.setVertexBuffer(0, verticesBuffer);
-          depthPrePass.setViewport(
-            (canvas.width * m) / 2,
-            0,
-            canvas.width / 2,
-            canvas.height,
-            0,
-            1
-          );
-          depthPrePass.draw(geometryDrawCount, numInstances, 0, 0);
-          depthPrePass.end();
-        }
-        {
-          textureQuadPassDescriptors[m].colorAttachments[0].view = attachment;
-          const depthTextureQuadPass = commandEncoder.beginRenderPass(
-            textureQuadPassDescriptors[m]
-          );
-          depthTextureQuadPass.setPipeline(textureQuadPassPipline);
-          depthTextureQuadPass.setBindGroup(0, depthTextureBindGroup);
-          depthTextureQuadPass.setViewport(
-            (canvas.width * m) / 2,
-            0,
-            canvas.width / 2,
-            canvas.height,
-            0,
-            1
-          );
-          depthTextureQuadPass.draw(6);
-          depthTextureQuadPass.end();
-        }
-      }
-    }
-    device.queue.submit([commandEncoder.finish()]);
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const ReversedZ: () => JSX.Element = () =>
-  makeSample({
-    name: 'Reversed Z',
-    description: `This example shows the use of reversed z technique for better utilization of depth buffer precision.
-      The left column uses regular method, while the right one uses reversed z technique.
-      Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other.
-      Higher sets are placed further from camera (and are scaled for better visual purpose).
-      To use reversed z to render your scene, you will need depth store value to be 0.0, depth compare function to be greater,
-      and remap depth range by multiplying an additional matrix to your projection matrix.
-      Related reading:
-      https://developer.nvidia.com/content/depth-precision-visualized
-      https://web.archive.org/web/20220724174000/https://thxforthefish.com/posts/reverse_z/
-      `,
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './vertex.wgsl',
-        contents: vertexWGSL,
-        editable: true,
-      },
-      {
-        name: './fragment.wgsl',
-        contents: fragmentWGSL,
-        editable: true,
-      },
-      {
-        name: './vertexDepthPrePass.wgsl',
-        contents: vertexDepthPrePassWGSL,
-        editable: true,
-      },
-      {
-        name: './vertexTextureQuad.wgsl',
-        contents: vertexTextureQuadWGSL,
-        editable: true,
-      },
-      {
-        name: './fragmentTextureQuad.wgsl',
-        contents: fragmentTextureQuadWGSL,
-        editable: true,
-      },
-      {
-        name: './vertexPrecisionErrorPass.wgsl',
-        contents: vertexPrecisionErrorPassWGSL,
-        editable: true,
-      },
-      {
-        name: './fragmentPrecisionErrorPass.wgsl',
-        contents: fragmentPrecisionErrorPassWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default ReversedZ;
diff --git a/src/sample/rotatingCube/main.ts b/src/sample/rotatingCube/main.ts
deleted file mode 100644
index fa92f556..00000000
--- a/src/sample/rotatingCube/main.ts
+++ /dev/null
@@ -1,226 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-
-import basicVertWGSL from '../../shaders/basic.vert.wgsl';
-import vertexPositionColorWGSL from '../../shaders/vertexPositionColor.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: basicVertWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: vertexPositionColorWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Backface culling since the cube is solid piece of geometry.
-      // Faces pointing away from the camera will be occluded by faces
-      // pointing toward the camera.
-      cullMode: 'back',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const uniformBufferSize = 4 * 16; // 4x4 matrix
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-  const modelViewProjectionMatrix = mat4.create();
-
-  function getTransformationMatrix() {
-    const viewMatrix = mat4.identity();
-    mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);
-    const now = Date.now() / 1000;
-    mat4.rotate(
-      viewMatrix,
-      vec3.fromValues(Math.sin(now), Math.cos(now), 0),
-      1,
-      viewMatrix
-    );
-
-    mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);
-
-    return modelViewProjectionMatrix as Float32Array;
-  }
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const transformationMatrix = getTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      transformationMatrix.buffer,
-      transformationMatrix.byteOffset,
-      transformationMatrix.byteLength
-    );
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setBindGroup(0, uniformBindGroup);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-    passEncoder.draw(cubeVertexCount);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const RotatingCube: () => JSX.Element = () =>
-  makeSample({
-    name: 'Rotating Cube',
-    description:
-      'This example shows how to upload uniform data every frame to render a rotating object.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/basic.vert.wgsl',
-        contents: basicVertWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/vertexPositionColor.frag.wgsl',
-        contents: vertexPositionColorWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default RotatingCube;
diff --git a/src/sample/samplerParameters/main.ts b/src/sample/samplerParameters/main.ts
deleted file mode 100644
index ce05fc7c..00000000
--- a/src/sample/samplerParameters/main.ts
+++ /dev/null
@@ -1,389 +0,0 @@
-import { mat4 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import texturedSquareWGSL from './texturedSquare.wgsl';
-import showTextureWGSL from './showTexture.wgsl';
-
-const kMatrices: Readonly = new Float32Array([
-  // Row 1: Scale by 2
-  ...mat4.scale(mat4.rotationZ(Math.PI / 16), [2, 2, 1]),
-  ...mat4.scale(mat4.identity(), [2, 2, 1]),
-  ...mat4.scale(mat4.rotationX(-Math.PI * 0.3), [2, 2, 1]),
-  ...mat4.scale(mat4.rotationX(-Math.PI * 0.42), [2, 2, 1]),
-  // Row 2: Scale by 1
-  ...mat4.rotationZ(Math.PI / 16),
-  ...mat4.identity(),
-  ...mat4.rotationX(-Math.PI * 0.3),
-  ...mat4.rotationX(-Math.PI * 0.42),
-  // Row 3: Scale by 0.9
-  ...mat4.scale(mat4.rotationZ(Math.PI / 16), [0.9, 0.9, 1]),
-  ...mat4.scale(mat4.identity(), [0.9, 0.9, 1]),
-  ...mat4.scale(mat4.rotationX(-Math.PI * 0.3), [0.9, 0.9, 1]),
-  ...mat4.scale(mat4.rotationX(-Math.PI * 0.42), [0.9, 0.9, 1]),
-  // Row 4: Scale by 0.3
-  ...mat4.scale(mat4.rotationZ(Math.PI / 16), [0.3, 0.3, 1]),
-  ...mat4.scale(mat4.identity(), [0.3, 0.3, 1]),
-  ...mat4.scale(mat4.rotationX(-Math.PI * 0.3), [0.3, 0.3, 1]),
-]);
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-
-  //
-  // GUI controls
-  //
-
-  const kInitConfig = {
-    flangeLogSize: 1.0,
-    highlightFlange: false,
-    animation: 0.1,
-  } as const;
-  const config = { ...kInitConfig };
-  const updateConfigBuffer = () => {
-    const t = (performance.now() / 1000) * 0.5;
-    const data = new Float32Array([
-      Math.cos(t) * config.animation,
-      Math.sin(t) * config.animation,
-      (2 ** config.flangeLogSize - 1) / 2,
-      Number(config.highlightFlange),
-    ]);
-    device.queue.writeBuffer(bufConfig, 64, data);
-  };
-
-  const kInitSamplerDescriptor = {
-    addressModeU: 'clamp-to-edge',
-    addressModeV: 'clamp-to-edge',
-    magFilter: 'linear',
-    minFilter: 'linear',
-    mipmapFilter: 'linear',
-    lodMinClamp: 0,
-    lodMaxClamp: 4,
-    maxAnisotropy: 1,
-  } as const;
-  const samplerDescriptor: GPUSamplerDescriptor = { ...kInitSamplerDescriptor };
-
-  {
-    const buttons = {
-      initial() {
-        Object.assign(config, kInitConfig);
-        Object.assign(samplerDescriptor, kInitSamplerDescriptor);
-        gui.updateDisplay();
-      },
-      checkerboard() {
-        Object.assign(config, { flangeLogSize: 10 });
-        Object.assign(samplerDescriptor, {
-          addressModeU: 'repeat',
-          addressModeV: 'repeat',
-        });
-        gui.updateDisplay();
-      },
-      smooth() {
-        Object.assign(samplerDescriptor, {
-          magFilter: 'linear',
-          minFilter: 'linear',
-          mipmapFilter: 'linear',
-        });
-        gui.updateDisplay();
-      },
-      crunchy() {
-        Object.assign(samplerDescriptor, {
-          magFilter: 'nearest',
-          minFilter: 'nearest',
-          mipmapFilter: 'nearest',
-        });
-        gui.updateDisplay();
-      },
-    };
-    const presets = gui.addFolder('Presets');
-    presets.open();
-    presets.add(buttons, 'initial').name('reset to initial');
-    presets.add(buttons, 'checkerboard').name('checkered floor');
-    presets.add(buttons, 'smooth').name('smooth (linear)');
-    presets.add(buttons, 'crunchy').name('crunchy (nearest)');
-
-    const flangeFold = gui.addFolder('Plane settings');
-    flangeFold.open();
-    flangeFold.add(config, 'flangeLogSize', 0, 10.0, 0.1).name('size = 2**');
-    flangeFold.add(config, 'highlightFlange');
-    flangeFold.add(config, 'animation', 0, 0.5);
-
-    gui.width = 280;
-    {
-      const folder = gui.addFolder('GPUSamplerDescriptor');
-      folder.open();
-
-      const kAddressModes = ['clamp-to-edge', 'repeat', 'mirror-repeat'];
-      folder.add(samplerDescriptor, 'addressModeU', kAddressModes);
-      folder.add(samplerDescriptor, 'addressModeV', kAddressModes);
-
-      const kFilterModes = ['nearest', 'linear'];
-      folder.add(samplerDescriptor, 'magFilter', kFilterModes);
-      folder.add(samplerDescriptor, 'minFilter', kFilterModes);
-      const kMipmapFilterModes = ['nearest', 'linear'] as const;
-      folder.add(samplerDescriptor, 'mipmapFilter', kMipmapFilterModes);
-
-      const ctlMin = folder.add(samplerDescriptor, 'lodMinClamp', 0, 4, 0.1);
-      const ctlMax = folder.add(samplerDescriptor, 'lodMaxClamp', 0, 4, 0.1);
-      ctlMin.onChange((value: number) => {
-        if (samplerDescriptor.lodMaxClamp < value) ctlMax.setValue(value);
-      });
-      ctlMax.onChange((value: number) => {
-        if (samplerDescriptor.lodMinClamp > value) ctlMin.setValue(value);
-      });
-
-      {
-        const folder2 = folder.addFolder(
-          'maxAnisotropy (set only if all "linear")'
-        );
-        folder2.open();
-        const kMaxAnisotropy = 16;
-        folder2.add(samplerDescriptor, 'maxAnisotropy', 1, kMaxAnisotropy, 1);
-      }
-    }
-  }
-
-  //
-  // Canvas setup
-  //
-
-  // Low-res, pixelated render target so it's easier to see fine details.
-  const kCanvasSize = 200;
-  const kViewportGridSize = 4;
-  const kViewportGridStride = Math.floor(kCanvasSize / kViewportGridSize);
-  const kViewportSize = kViewportGridStride - 2;
-
-  // The canvas buffer size is 200x200.
-  // Compute a canvas CSS size such that there's an integer number of device
-  // pixels per canvas pixel ("integer" or "pixel-perfect" scaling).
-  // Note the result may be 1 pixel off since ResizeObserver is not used.
-  const kCanvasLayoutCSSSize = 600; // set by template styles
-  const kCanvasLayoutDevicePixels = kCanvasLayoutCSSSize * devicePixelRatio;
-  const kScaleFactor = Math.floor(kCanvasLayoutDevicePixels / kCanvasSize);
-  const kCanvasDevicePixels = kScaleFactor * kCanvasSize;
-  const kCanvasCSSSize = kCanvasDevicePixels / devicePixelRatio;
-  canvas.style.imageRendering = 'pixelated';
-  canvas.width = canvas.height = kCanvasSize;
-  canvas.style.minWidth = canvas.style.maxWidth = kCanvasCSSSize + 'px';
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  //
-  // Initialize test texture
-  //
-
-  // Set up a texture with 4 mip levels, each containing a differently-colored
-  // checkerboard with 1x1 pixels (so when rendered the checkerboards are
-  // different sizes). This is different from a normal mipmap where each level
-  // would look like a lower-resolution version of the previous one.
-  // Level 0 is 16x16 white/black
-  // Level 1 is 8x8 blue/black
-  // Level 2 is 4x4 yellow/black
-  // Level 3 is 2x2 pink/black
-  const kTextureMipLevels = 4;
-  const kTextureBaseSize = 16;
-  const checkerboard = device.createTexture({
-    format: 'rgba8unorm',
-    usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
-    size: [kTextureBaseSize, kTextureBaseSize],
-    mipLevelCount: 4,
-  });
-  const checkerboardView = checkerboard.createView();
-
-  const kColorForLevel = [
-    [255, 255, 255, 255],
-    [30, 136, 229, 255], // blue
-    [255, 193, 7, 255], // yellow
-    [216, 27, 96, 255], // pink
-  ];
-  for (let mipLevel = 0; mipLevel < kTextureMipLevels; ++mipLevel) {
-    const size = 2 ** (kTextureMipLevels - mipLevel); // 16, 8, 4, 2
-    const data = new Uint8Array(size * size * 4);
-    for (let y = 0; y < size; ++y) {
-      for (let x = 0; x < size; ++x) {
-        data.set(
-          (x + y) % 2 ? kColorForLevel[mipLevel] : [0, 0, 0, 255],
-          (y * size + x) * 4
-        );
-      }
-    }
-    device.queue.writeTexture(
-      { texture: checkerboard, mipLevel },
-      data,
-      { bytesPerRow: size * 4 },
-      [size, size]
-    );
-  }
-
-  //
-  // "Debug" view of the actual texture contents
-  //
-
-  const showTextureModule = device.createShaderModule({
-    code: showTextureWGSL,
-  });
-  const showTexturePipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: { module: showTextureModule, entryPoint: 'vmain' },
-    fragment: {
-      module: showTextureModule,
-      entryPoint: 'fmain',
-      targets: [{ format: presentationFormat }],
-    },
-    primitive: { topology: 'triangle-list' },
-  });
-
-  const showTextureBG = device.createBindGroup({
-    layout: showTexturePipeline.getBindGroupLayout(0),
-    entries: [{ binding: 0, resource: checkerboardView }],
-  });
-
-  //
-  // Pipeline for drawing the test squares
-  //
-
-  const texturedSquareModule = device.createShaderModule({
-    code: texturedSquareWGSL,
-  });
-
-  const texturedSquarePipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: texturedSquareModule,
-      entryPoint: 'vmain',
-      constants: { kTextureBaseSize, kViewportSize },
-    },
-    fragment: {
-      module: texturedSquareModule,
-      entryPoint: 'fmain',
-      targets: [{ format: presentationFormat }],
-    },
-    primitive: { topology: 'triangle-list' },
-  });
-  const texturedSquareBGL = texturedSquarePipeline.getBindGroupLayout(0);
-
-  const bufConfig = device.createBuffer({
-    usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,
-    size: 128,
-  });
-  // View-projection matrix set up so it doesn't transform anything at z=0.
-  const kCameraDist = 3;
-  const viewProj = mat4.translate(
-    mat4.perspective(2 * Math.atan(1 / kCameraDist), 1, 0.1, 100),
-    [0, 0, -kCameraDist]
-  );
-  device.queue.writeBuffer(bufConfig, 0, viewProj as Float32Array);
-
-  const bufMatrices = device.createBuffer({
-    usage: GPUBufferUsage.STORAGE,
-    size: kMatrices.byteLength,
-    mappedAtCreation: true,
-  });
-  new Float32Array(bufMatrices.getMappedRange()).set(kMatrices);
-  bufMatrices.unmap();
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    updateConfigBuffer();
-
-    const sampler = device.createSampler({
-      ...samplerDescriptor,
-      maxAnisotropy:
-        samplerDescriptor.minFilter === 'linear' &&
-        samplerDescriptor.magFilter === 'linear' &&
-        samplerDescriptor.mipmapFilter === 'linear'
-          ? samplerDescriptor.maxAnisotropy
-          : 1,
-    });
-
-    const bindGroup = device.createBindGroup({
-      layout: texturedSquareBGL,
-      entries: [
-        { binding: 0, resource: { buffer: bufConfig } },
-        { binding: 1, resource: { buffer: bufMatrices } },
-        { binding: 2, resource: sampler },
-        { binding: 3, resource: checkerboardView },
-      ],
-    });
-
-    const textureView = context.getCurrentTexture().createView();
-
-    const commandEncoder = device.createCommandEncoder();
-
-    const renderPassDescriptor: GPURenderPassDescriptor = {
-      colorAttachments: [
-        {
-          view: textureView,
-          clearValue: { r: 0.2, g: 0.2, b: 0.2, a: 1.0 },
-          loadOp: 'clear',
-          storeOp: 'store',
-        },
-      ],
-    };
-
-    const pass = commandEncoder.beginRenderPass(renderPassDescriptor);
-    // Draw test squares
-    pass.setPipeline(texturedSquarePipeline);
-    pass.setBindGroup(0, bindGroup);
-    for (let i = 0; i < kViewportGridSize ** 2 - 1; ++i) {
-      const vpX = kViewportGridStride * (i % kViewportGridSize) + 1;
-      const vpY = kViewportGridStride * Math.floor(i / kViewportGridSize) + 1;
-      pass.setViewport(vpX, vpY, kViewportSize, kViewportSize, 0, 1);
-      pass.draw(6, 1, 0, i);
-    }
-    // Show texture contents
-    pass.setPipeline(showTexturePipeline);
-    pass.setBindGroup(0, showTextureBG);
-    const kLastViewport = (kViewportGridSize - 1) * kViewportGridStride + 1;
-    pass.setViewport(kLastViewport, kLastViewport, 32, 32, 0, 1);
-    pass.draw(6, 1, 0, 0);
-    pass.setViewport(kLastViewport + 32, kLastViewport, 16, 16, 0, 1);
-    pass.draw(6, 1, 0, 1);
-    pass.setViewport(kLastViewport + 32, kLastViewport + 16, 8, 8, 0, 1);
-    pass.draw(6, 1, 0, 2);
-    pass.setViewport(kLastViewport + 32, kLastViewport + 24, 4, 4, 0, 1);
-    pass.draw(6, 1, 0, 3);
-    pass.end();
-
-    device.queue.submit([commandEncoder.finish()]);
-    requestAnimationFrame(frame);
-  }
-
-  requestAnimationFrame(frame);
-};
-
-export default () =>
-  makeSample({
-    name: 'Sampler Parameters',
-    description:
-      'Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).',
-    gui: true,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './texturedSquare.wgsl',
-        contents: texturedSquareWGSL,
-        editable: true,
-      },
-      {
-        name: './showTexture.wgsl',
-        contents: showTextureWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
diff --git a/src/sample/shadowMapping/main.ts b/src/sample/shadowMapping/main.ts
deleted file mode 100644
index 9322dff6..00000000
--- a/src/sample/shadowMapping/main.ts
+++ /dev/null
@@ -1,456 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import { mesh } from '../../meshes/stanfordDragon';
-
-import vertexShadowWGSL from './vertexShadow.wgsl';
-import vertexWGSL from './vertex.wgsl';
-import fragmentWGSL from './fragment.wgsl';
-
-const shadowDepthTextureSize = 1024;
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const aspect = canvas.width / canvas.height;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create the model vertex buffer.
-  const vertexBuffer = device.createBuffer({
-    size: mesh.positions.length * 3 * 2 * Float32Array.BYTES_PER_ELEMENT,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  {
-    const mapping = new Float32Array(vertexBuffer.getMappedRange());
-    for (let i = 0; i < mesh.positions.length; ++i) {
-      mapping.set(mesh.positions[i], 6 * i);
-      mapping.set(mesh.normals[i], 6 * i + 3);
-    }
-    vertexBuffer.unmap();
-  }
-
-  // Create the model index buffer.
-  const indexCount = mesh.triangles.length * 3;
-  const indexBuffer = device.createBuffer({
-    size: indexCount * Uint16Array.BYTES_PER_ELEMENT,
-    usage: GPUBufferUsage.INDEX,
-    mappedAtCreation: true,
-  });
-  {
-    const mapping = new Uint16Array(indexBuffer.getMappedRange());
-    for (let i = 0; i < mesh.triangles.length; ++i) {
-      mapping.set(mesh.triangles[i], 3 * i);
-    }
-    indexBuffer.unmap();
-  }
-
-  // Create the depth texture for rendering/sampling the shadow map.
-  const shadowDepthTexture = device.createTexture({
-    size: [shadowDepthTextureSize, shadowDepthTextureSize, 1],
-    usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
-    format: 'depth32float',
-  });
-  const shadowDepthTextureView = shadowDepthTexture.createView();
-
-  // Create some common descriptors used for both the shadow pipeline
-  // and the color rendering pipeline.
-  const vertexBuffers: Iterable = [
-    {
-      arrayStride: Float32Array.BYTES_PER_ELEMENT * 6,
-      attributes: [
-        {
-          // position
-          shaderLocation: 0,
-          offset: 0,
-          format: 'float32x3',
-        },
-        {
-          // normal
-          shaderLocation: 1,
-          offset: Float32Array.BYTES_PER_ELEMENT * 3,
-          format: 'float32x3',
-        },
-      ],
-    },
-  ];
-
-  const primitive: GPUPrimitiveState = {
-    topology: 'triangle-list',
-    cullMode: 'back',
-  };
-
-  const uniformBufferBindGroupLayout = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-    ],
-  });
-
-  const shadowPipeline = device.createRenderPipeline({
-    layout: device.createPipelineLayout({
-      bindGroupLayouts: [
-        uniformBufferBindGroupLayout,
-        uniformBufferBindGroupLayout,
-      ],
-    }),
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexShadowWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: vertexBuffers,
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth32float',
-    },
-    primitive,
-  });
-
-  // Create a bind group layout which holds the scene uniforms and
-  // the texture+sampler for depth. We create it manually because the WebPU
-  // implementation doesn't infer this from the shader (yet).
-  const bglForRender = device.createBindGroupLayout({
-    entries: [
-      {
-        binding: 0,
-        visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
-        buffer: {
-          type: 'uniform',
-        },
-      },
-      {
-        binding: 1,
-        visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
-        texture: {
-          sampleType: 'depth',
-        },
-      },
-      {
-        binding: 2,
-        visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
-        sampler: {
-          type: 'comparison',
-        },
-      },
-    ],
-  });
-
-  const pipeline = device.createRenderPipeline({
-    layout: device.createPipelineLayout({
-      bindGroupLayouts: [bglForRender, uniformBufferBindGroupLayout],
-    }),
-    vertex: {
-      module: device.createShaderModule({
-        code: vertexWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: vertexBuffers,
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: fragmentWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-      constants: {
-        shadowDepthTextureSize,
-      },
-    },
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus-stencil8',
-    },
-    primitive,
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus-stencil8',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        // view is acquired and set in render loop.
-        view: undefined,
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-      stencilClearValue: 0,
-      stencilLoadOp: 'clear',
-      stencilStoreOp: 'store',
-    },
-  };
-
-  const modelUniformBuffer = device.createBuffer({
-    size: 4 * 16, // 4x4 matrix
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const sceneUniformBuffer = device.createBuffer({
-    // Two 4x4 viewProj matrices,
-    // one for the camera and one for the light.
-    // Then a vec3 for the light position.
-    // Rounded to the nearest multiple of 16.
-    size: 2 * 4 * 16 + 4 * 4,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const sceneBindGroupForShadow = device.createBindGroup({
-    layout: uniformBufferBindGroupLayout,
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: sceneUniformBuffer,
-        },
-      },
-    ],
-  });
-
-  const sceneBindGroupForRender = device.createBindGroup({
-    layout: bglForRender,
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: sceneUniformBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: shadowDepthTextureView,
-      },
-      {
-        binding: 2,
-        resource: device.createSampler({
-          compare: 'less',
-        }),
-      },
-    ],
-  });
-
-  const modelBindGroup = device.createBindGroup({
-    layout: uniformBufferBindGroupLayout,
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: modelUniformBuffer,
-        },
-      },
-    ],
-  });
-
-  const eyePosition = vec3.fromValues(0, 50, -100);
-  const upVector = vec3.fromValues(0, 1, 0);
-  const origin = vec3.fromValues(0, 0, 0);
-
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    2000.0
-  );
-
-  const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);
-
-  const lightPosition = vec3.fromValues(50, 100, -100);
-  const lightViewMatrix = mat4.lookAt(lightPosition, origin, upVector);
-  const lightProjectionMatrix = mat4.create();
-  {
-    const left = -80;
-    const right = 80;
-    const bottom = -80;
-    const top = 80;
-    const near = -200;
-    const far = 300;
-    mat4.ortho(left, right, bottom, top, near, far, lightProjectionMatrix);
-  }
-
-  const lightViewProjMatrix = mat4.multiply(
-    lightProjectionMatrix,
-    lightViewMatrix
-  );
-
-  const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);
-
-  // Move the model so it's centered.
-  const modelMatrix = mat4.translation([0, -45, 0]);
-
-  // The camera/light aren't moving, so write them into buffers now.
-  {
-    const lightMatrixData = lightViewProjMatrix as Float32Array;
-    device.queue.writeBuffer(
-      sceneUniformBuffer,
-      0,
-      lightMatrixData.buffer,
-      lightMatrixData.byteOffset,
-      lightMatrixData.byteLength
-    );
-
-    const cameraMatrixData = viewProjMatrix as Float32Array;
-    device.queue.writeBuffer(
-      sceneUniformBuffer,
-      64,
-      cameraMatrixData.buffer,
-      cameraMatrixData.byteOffset,
-      cameraMatrixData.byteLength
-    );
-
-    const lightData = lightPosition as Float32Array;
-    device.queue.writeBuffer(
-      sceneUniformBuffer,
-      128,
-      lightData.buffer,
-      lightData.byteOffset,
-      lightData.byteLength
-    );
-
-    const modelData = modelMatrix as Float32Array;
-    device.queue.writeBuffer(
-      modelUniformBuffer,
-      0,
-      modelData.buffer,
-      modelData.byteOffset,
-      modelData.byteLength
-    );
-  }
-
-  // Rotates the camera around the origin based on time.
-  function getCameraViewProjMatrix() {
-    const eyePosition = vec3.fromValues(0, 50, -100);
-
-    const rad = Math.PI * (Date.now() / 2000);
-    const rotation = mat4.rotateY(mat4.translation(origin), rad);
-    vec3.transformMat4(eyePosition, rotation, eyePosition);
-
-    const viewMatrix = mat4.lookAt(eyePosition, origin, upVector);
-
-    mat4.multiply(projectionMatrix, viewMatrix, viewProjMatrix);
-    return viewProjMatrix as Float32Array;
-  }
-
-  const shadowPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [],
-    depthStencilAttachment: {
-      view: shadowDepthTextureView,
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const cameraViewProj = getCameraViewProjMatrix();
-    device.queue.writeBuffer(
-      sceneUniformBuffer,
-      64,
-      cameraViewProj.buffer,
-      cameraViewProj.byteOffset,
-      cameraViewProj.byteLength
-    );
-
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    {
-      const shadowPass = commandEncoder.beginRenderPass(shadowPassDescriptor);
-      shadowPass.setPipeline(shadowPipeline);
-      shadowPass.setBindGroup(0, sceneBindGroupForShadow);
-      shadowPass.setBindGroup(1, modelBindGroup);
-      shadowPass.setVertexBuffer(0, vertexBuffer);
-      shadowPass.setIndexBuffer(indexBuffer, 'uint16');
-      shadowPass.drawIndexed(indexCount);
-
-      shadowPass.end();
-    }
-    {
-      const renderPass = commandEncoder.beginRenderPass(renderPassDescriptor);
-      renderPass.setPipeline(pipeline);
-      renderPass.setBindGroup(0, sceneBindGroupForRender);
-      renderPass.setBindGroup(1, modelBindGroup);
-      renderPass.setVertexBuffer(0, vertexBuffer);
-      renderPass.setIndexBuffer(indexBuffer, 'uint16');
-      renderPass.drawIndexed(indexCount);
-
-      renderPass.end();
-    }
-    device.queue.submit([commandEncoder.finish()]);
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const ShadowMapping: () => JSX.Element = () =>
-  makeSample({
-    name: 'Shadow Mapping',
-    description:
-      'This example shows how to sample from a depth texture to render shadows.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './vertexShadow.wgsl',
-        contents: vertexShadowWGSL,
-        editable: true,
-      },
-      {
-        name: './vertex.wgsl',
-        contents: vertexWGSL,
-        editable: true,
-      },
-      {
-        name: './fragment.wgsl',
-        contents: fragmentWGSL,
-        editable: true,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default ShadowMapping;
diff --git a/src/sample/skinnedMesh/main.ts b/src/sample/skinnedMesh/main.ts
deleted file mode 100644
index 1bcb98e7..00000000
--- a/src/sample/skinnedMesh/main.ts
+++ /dev/null
@@ -1,605 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-import { convertGLBToJSONAndBinary, GLTFSkin } from './glbUtils';
-import gltfWGSL from './gltf.wgsl';
-import gridWGSL from './grid.wgsl';
-import { Mat4, mat4, Quat, vec3 } from 'wgpu-matrix';
-import { createBindGroupCluster } from '../bitonicSort/utils';
-import {
-  createSkinnedGridBuffers,
-  createSkinnedGridRenderPipeline,
-} from './gridUtils';
-import { gridIndices } from './gridData';
-
-const MAT4X4_BYTES = 64;
-
-interface BoneObject {
-  transforms: Mat4[];
-  bindPoses: Mat4[];
-  bindPosesInv: Mat4[];
-}
-
-enum RenderMode {
-  NORMAL,
-  JOINTS,
-  WEIGHTS,
-}
-
-enum SkinMode {
-  ON,
-  OFF,
-}
-
-// Copied from toji/gl-matrix
-const getRotation = (mat: Mat4): Quat => {
-  // Initialize our output quaternion
-  const out = [0, 0, 0, 0];
-  // Extract the scaling factor from the final matrix transformation
-  // to normalize our rotation;
-  const scaling = mat4.getScaling(mat);
-  const is1 = 1 / scaling[0];
-  const is2 = 1 / scaling[1];
-  const is3 = 1 / scaling[2];
-
-  // Scale the matrix elements by the scaling factors
-  const sm11 = mat[0] * is1;
-  const sm12 = mat[1] * is2;
-  const sm13 = mat[2] * is3;
-  const sm21 = mat[4] * is1;
-  const sm22 = mat[5] * is2;
-  const sm23 = mat[6] * is3;
-  const sm31 = mat[8] * is1;
-  const sm32 = mat[9] * is2;
-  const sm33 = mat[10] * is3;
-
-  // The trace of a square matrix is the sum of its diagonal entries
-  // While the matrix trace has many interesting mathematical properties,
-  // the primary purpose of the trace is to assess the characteristics of the rotation.
-  const trace = sm11 + sm22 + sm33;
-  let S = 0;
-
-  // If all matrix elements contribute equally to the rotation.
-  if (trace > 0) {
-    S = Math.sqrt(trace + 1.0) * 2;
-    out[3] = 0.25 * S;
-    out[0] = (sm23 - sm32) / S;
-    out[1] = (sm31 - sm13) / S;
-    out[2] = (sm12 - sm21) / S;
-    // If the rotation is primarily around the x-axis
-  } else if (sm11 > sm22 && sm11 > sm33) {
-    S = Math.sqrt(1.0 + sm11 - sm22 - sm33) * 2;
-    out[3] = (sm23 - sm32) / S;
-    out[0] = 0.25 * S;
-    out[1] = (sm12 + sm21) / S;
-    out[2] = (sm31 + sm13) / S;
-    // If rotation is primarily around the y-axis
-  } else if (sm22 > sm33) {
-    S = Math.sqrt(1.0 + sm22 - sm11 - sm33) * 2;
-    out[3] = (sm31 - sm13) / S;
-    out[0] = (sm12 + sm21) / S;
-    out[1] = 0.25 * S;
-    out[2] = (sm23 + sm32) / S;
-    // If the rotation is primarily around the z-axis
-  } else {
-    S = Math.sqrt(1.0 + sm33 - sm11 - sm22) * 2;
-    out[3] = (sm12 - sm21) / S;
-    out[0] = (sm31 + sm13) / S;
-    out[1] = (sm23 + sm32) / S;
-    out[2] = 0.25 * S;
-  }
-
-  return out;
-};
-
-const init: SampleInit = async ({ canvas, pageState, gui }) => {
-  //Normal setup
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio || 1;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  const settings = {
-    cameraX: 0,
-    cameraY: -5.1,
-    cameraZ: -14.6,
-    objectScale: 1,
-    angle: 0.2,
-    speed: 50,
-    object: 'Whale',
-    renderMode: 'NORMAL',
-    skinMode: 'ON',
-  };
-
-  // Determine whether we want to render our whale or our skinned grid
-  gui.add(settings, 'object', ['Whale', 'Skinned Grid']).onChange(() => {
-    if (settings.object === 'Skinned Grid') {
-      settings.cameraX = -10;
-      settings.cameraY = 0;
-      settings.objectScale = 1.27;
-    } else {
-      if (settings.skinMode === 'OFF') {
-        settings.cameraX = 0;
-        settings.cameraY = 0;
-        settings.cameraZ = -11;
-      } else {
-        settings.cameraX = 0;
-        settings.cameraY = -5.1;
-        settings.cameraZ = -14.6;
-      }
-    }
-  });
-
-  // Output the mesh normals, its joints, or the weights that influence the movement of the joints
-  gui
-    .add(settings, 'renderMode', ['NORMAL', 'JOINTS', 'WEIGHTS'])
-    .onChange(() => {
-      device.queue.writeBuffer(
-        generalUniformsBuffer,
-        0,
-        new Uint32Array([RenderMode[settings.renderMode]])
-      );
-    });
-  // Determine whether the mesh is static or whether skinning is activated
-  gui.add(settings, 'skinMode', ['ON', 'OFF']).onChange(() => {
-    if (settings.object === 'Whale') {
-      if (settings.skinMode === 'OFF') {
-        settings.cameraX = 0;
-        settings.cameraY = 0;
-        settings.cameraZ = -11;
-      } else {
-        settings.cameraX = 0;
-        settings.cameraY = -5.1;
-        settings.cameraZ = -14.6;
-      }
-    }
-    device.queue.writeBuffer(
-      generalUniformsBuffer,
-      4,
-      new Uint32Array([SkinMode[settings.skinMode]])
-    );
-  });
-  const animFolder = gui.addFolder('Animation Settings');
-  animFolder.add(settings, 'angle', 0.05, 0.5).step(0.05);
-  animFolder.add(settings, 'speed', 10, 100).step(10);
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const cameraBuffer = device.createBuffer({
-    size: MAT4X4_BYTES * 3,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const cameraBGCluster = createBindGroupCluster(
-    [0],
-    [GPUShaderStage.VERTEX],
-    ['buffer'],
-    [{ type: 'uniform' }],
-    [[{ buffer: cameraBuffer }]],
-    'Camera',
-    device
-  );
-
-  const generalUniformsBuffer = device.createBuffer({
-    size: Uint32Array.BYTES_PER_ELEMENT * 2,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const generalUniformsBGCLuster = createBindGroupCluster(
-    [0],
-    [GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT],
-    ['buffer'],
-    [{ type: 'uniform' }],
-    [[{ buffer: generalUniformsBuffer }]],
-    'General',
-    device
-  );
-
-  // Same bindGroupLayout as in main file.
-  const nodeUniformsBindGroupLayout = device.createBindGroupLayout({
-    label: 'NodeUniforms.bindGroupLayout',
-    entries: [
-      {
-        binding: 0,
-        buffer: {
-          type: 'uniform',
-        },
-        visibility: GPUShaderStage.VERTEX,
-      },
-    ],
-  });
-
-  // Fetch whale resources from the glb file
-  const whaleScene = await fetch('../assets/gltf/whale.glb')
-    .then((res) => res.arrayBuffer())
-    .then((buffer) => convertGLBToJSONAndBinary(buffer, device));
-
-  // Builds a render pipeline for our whale mesh
-  // Since we are building a lightweight gltf parser around a gltf scene with a known
-  // quantity of meshes, we only build a renderPipeline for the singular mesh present
-  // within our scene. A more robust gltf parser would loop through all the meshes,
-  // cache replicated pipelines, and perform other optimizations.
-  whaleScene.meshes[0].buildRenderPipeline(
-    device,
-    gltfWGSL,
-    gltfWGSL,
-    presentationFormat,
-    depthTexture.format,
-    [
-      cameraBGCluster.bindGroupLayout,
-      generalUniformsBGCLuster.bindGroupLayout,
-      nodeUniformsBindGroupLayout,
-      GLTFSkin.skinBindGroupLayout,
-    ]
-  );
-
-  // Create skinned grid resources
-  const skinnedGridVertexBuffers = createSkinnedGridBuffers(device);
-  // Buffer for our uniforms, joints, and inverse bind matrices
-  const skinnedGridUniformBufferUsage: GPUBufferDescriptor = {
-    // 5 4x4 matrices, one for each bone
-    size: MAT4X4_BYTES * 5,
-    usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
-  };
-  const skinnedGridJointUniformBuffer = device.createBuffer(
-    skinnedGridUniformBufferUsage
-  );
-  const skinnedGridInverseBindUniformBuffer = device.createBuffer(
-    skinnedGridUniformBufferUsage
-  );
-  const skinnedGridBoneBGCluster = createBindGroupCluster(
-    [0, 1],
-    [GPUShaderStage.VERTEX, GPUShaderStage.VERTEX],
-    ['buffer', 'buffer'],
-    [{ type: 'read-only-storage' }, { type: 'read-only-storage' }],
-    [
-      [
-        { buffer: skinnedGridJointUniformBuffer },
-        { buffer: skinnedGridInverseBindUniformBuffer },
-      ],
-    ],
-    'SkinnedGridJointUniforms',
-    device
-  );
-  const skinnedGridPipeline = createSkinnedGridRenderPipeline(
-    device,
-    presentationFormat,
-    gridWGSL,
-    gridWGSL,
-    [
-      cameraBGCluster.bindGroupLayout,
-      generalUniformsBGCLuster.bindGroupLayout,
-      skinnedGridBoneBGCluster.bindGroupLayout,
-    ]
-  );
-
-  // Global Calc
-  const aspect = canvas.width / canvas.height;
-  const perspectiveProjection = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    0.1,
-    100.0
-  );
-
-  const orthographicProjection = mat4.ortho(-20, 20, -10, 10, -100, 100);
-
-  function getProjectionMatrix() {
-    if (settings.object !== 'Skinned Grid') {
-      return perspectiveProjection as Float32Array;
-    }
-    return orthographicProjection as Float32Array;
-  }
-
-  function getViewMatrix() {
-    const viewMatrix = mat4.identity();
-    if (settings.object === 'Skinned Grid') {
-      mat4.translate(
-        viewMatrix,
-        vec3.fromValues(
-          settings.cameraX * settings.objectScale,
-          settings.cameraY * settings.objectScale,
-          settings.cameraZ
-        ),
-        viewMatrix
-      );
-    } else {
-      mat4.translate(
-        viewMatrix,
-        vec3.fromValues(settings.cameraX, settings.cameraY, settings.cameraZ),
-        viewMatrix
-      );
-    }
-    return viewMatrix as Float32Array;
-  }
-
-  function getModelMatrix() {
-    const modelMatrix = mat4.identity();
-    const scaleVector = vec3.fromValues(
-      settings.objectScale,
-      settings.objectScale,
-      settings.objectScale
-    );
-    mat4.scale(modelMatrix, scaleVector, modelMatrix);
-    if (settings.object === 'Whale') {
-      mat4.rotateY(modelMatrix, (Date.now() / 1000) * 0.5, modelMatrix);
-    }
-    return modelMatrix as Float32Array;
-  }
-
-  // Pass Descriptor for GLTFs
-  const gltfRenderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.3, g: 0.3, b: 0.3, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-      depthLoadOp: 'clear',
-      depthClearValue: 1.0,
-      depthStoreOp: 'store',
-    },
-  };
-
-  // Pass descriptor for grid with no depth testing
-  const skinnedGridRenderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.3, g: 0.3, b: 0.3, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-  };
-
-  const animSkinnedGrid = (boneTransforms: Mat4[], angle: number) => {
-    const m = mat4.identity();
-    mat4.rotateZ(m, angle, boneTransforms[0]);
-    mat4.translate(boneTransforms[0], vec3.create(4, 0, 0), m);
-    mat4.rotateZ(m, angle, boneTransforms[1]);
-    mat4.translate(boneTransforms[1], vec3.create(4, 0, 0), m);
-    mat4.rotateZ(m, angle, boneTransforms[2]);
-  };
-
-  // Create a group of bones
-  // Each index associates an actual bone to its transforms, bindPoses, uniforms, etc
-  const createBoneCollection = (numBones: number): BoneObject => {
-    // Initial bone transformation
-    const transforms: Mat4[] = [];
-    // Bone bind poses, an extra matrix per joint/bone that represents the starting point
-    // of the bone before any transformations are applied
-    const bindPoses: Mat4[] = [];
-    // Create a transform, bind pose, and inverse bind pose for each bone
-    for (let i = 0; i < numBones; i++) {
-      transforms.push(mat4.identity());
-      bindPoses.push(mat4.identity());
-    }
-
-    // Get initial bind pose positions
-    animSkinnedGrid(bindPoses, 0);
-    const bindPosesInv = bindPoses.map((bindPose) => {
-      return mat4.inverse(bindPose);
-    });
-
-    return {
-      transforms,
-      bindPoses,
-      bindPosesInv,
-    };
-  };
-
-  // Create bones of the skinned grid and write the inverse bind positions to
-  // the skinned grid's inverse bind matrix array
-  const gridBoneCollection = createBoneCollection(5);
-  for (let i = 0; i < gridBoneCollection.bindPosesInv.length; i++) {
-    device.queue.writeBuffer(
-      skinnedGridInverseBindUniformBuffer,
-      i * 64,
-      gridBoneCollection.bindPosesInv[i] as Float32Array
-    );
-  }
-
-  // A map that maps a joint index to the original matrix transformation of a bone
-  const origMatrices = new Map();
-  const animWhaleSkin = (skin: GLTFSkin, angle: number) => {
-    for (let i = 0; i < skin.joints.length; i++) {
-      // Index into the current joint
-      const joint = skin.joints[i];
-      // If our map does
-      if (!origMatrices.has(joint)) {
-        origMatrices.set(joint, whaleScene.nodes[joint].source.getMatrix());
-      }
-      // Get the original position, rotation, and scale of the current joint
-      const origMatrix = origMatrices.get(joint);
-      let m = mat4.create();
-      // Depending on which bone we are accessing, apply a specific rotation to the bone's original
-      // transformation to animate it
-      if (joint === 1 || joint === 0) {
-        m = mat4.rotateY(origMatrix, -angle);
-      } else if (joint === 3 || joint === 4) {
-        m = mat4.rotateX(origMatrix, joint === 3 ? angle : -angle);
-      } else {
-        m = mat4.rotateZ(origMatrix, angle);
-      }
-      // Apply the current transformation to the transform values within the relevant nodes
-      // (these nodes, of course, each being nodes that represent joints/bones)
-      whaleScene.nodes[joint].source.position = mat4.getTranslation(m);
-      whaleScene.nodes[joint].source.scale = mat4.getScaling(m);
-      whaleScene.nodes[joint].source.rotation = getRotation(m);
-    }
-  };
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    // Calculate camera matrices
-    const projectionMatrix = getProjectionMatrix();
-    const viewMatrix = getViewMatrix();
-    const modelMatrix = getModelMatrix();
-
-    // Calculate bone transformation
-    const t = (Date.now() / 20000) * settings.speed;
-    const angle = Math.sin(t) * settings.angle;
-    // Compute Transforms when angle is applied
-    animSkinnedGrid(gridBoneCollection.transforms, angle);
-
-    // Write to mvp to camera buffer
-    device.queue.writeBuffer(
-      cameraBuffer,
-      0,
-      projectionMatrix.buffer,
-      projectionMatrix.byteOffset,
-      projectionMatrix.byteLength
-    );
-
-    device.queue.writeBuffer(
-      cameraBuffer,
-      64,
-      viewMatrix.buffer,
-      viewMatrix.byteOffset,
-      viewMatrix.byteLength
-    );
-
-    device.queue.writeBuffer(
-      cameraBuffer,
-      128,
-      modelMatrix.buffer,
-      modelMatrix.byteOffset,
-      modelMatrix.byteLength
-    );
-
-    // Write to skinned grid bone uniform buffer
-    for (let i = 0; i < gridBoneCollection.transforms.length; i++) {
-      device.queue.writeBuffer(
-        skinnedGridJointUniformBuffer,
-        i * 64,
-        gridBoneCollection.transforms[i] as Float32Array
-      );
-    }
-
-    // Difference between these two render passes is just the presence of depthTexture
-    gltfRenderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    skinnedGridRenderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    // Update node matrixes
-    for (const scene of whaleScene.scenes) {
-      scene.root.updateWorldMatrix(device);
-    }
-
-    // Updates skins (we index into skins in the renderer, which is not the best approach but hey)
-    animWhaleSkin(whaleScene.skins[0], Math.sin(t) * settings.angle);
-    // Node 6 should be the only node with a drawable mesh so hopefully this works fine
-    whaleScene.skins[0].update(device, 6, whaleScene.nodes);
-
-    const commandEncoder = device.createCommandEncoder();
-    if (settings.object === 'Whale') {
-      const passEncoder = commandEncoder.beginRenderPass(
-        gltfRenderPassDescriptor
-      );
-      for (const scene of whaleScene.scenes) {
-        scene.root.renderDrawables(passEncoder, [
-          cameraBGCluster.bindGroups[0],
-          generalUniformsBGCLuster.bindGroups[0],
-        ]);
-      }
-      passEncoder.end();
-    } else {
-      // Our skinned grid isn't checking for depth, so we pass it
-      // a separate render descriptor that does not take in a depth texture
-      const passEncoder = commandEncoder.beginRenderPass(
-        skinnedGridRenderPassDescriptor
-      );
-      passEncoder.setPipeline(skinnedGridPipeline);
-      passEncoder.setBindGroup(0, cameraBGCluster.bindGroups[0]);
-      passEncoder.setBindGroup(1, generalUniformsBGCLuster.bindGroups[0]);
-      passEncoder.setBindGroup(2, skinnedGridBoneBGCluster.bindGroups[0]);
-      // Pass in vertex and index buffers generated from our static skinned grid
-      // data at ./gridData.ts
-      passEncoder.setVertexBuffer(0, skinnedGridVertexBuffers.positions);
-      passEncoder.setVertexBuffer(1, skinnedGridVertexBuffers.joints);
-      passEncoder.setVertexBuffer(2, skinnedGridVertexBuffers.weights);
-      passEncoder.setIndexBuffer(skinnedGridVertexBuffers.indices, 'uint16');
-      passEncoder.drawIndexed(gridIndices.length, 1);
-      passEncoder.end();
-    }
-
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const skinnedMesh: () => JSX.Element = () =>
-  makeSample({
-    name: 'Skinned Mesh',
-    description:
-      'A demonstration of basic gltf loading and mesh skinning, ported from https://webgl2fundamentals.org/webgl/lessons/webgl-skinning.html. Mesh data, per vertex attributes, and skin inverseBindMatrices are taken from the json parsed from the binary output of the .glb file. Animations are generated progrmatically, with animated joint matrices updated and passed to shaders per frame via uniform buffers.',
-    init,
-    gui: true,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './gridData.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./gridData.ts').default,
-      },
-      {
-        name: './gridUtils.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./gridUtils.ts').default,
-      },
-      {
-        name: './grid.wgsl',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./grid.wgsl').default,
-      },
-      {
-        name: './gltf.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./gltf.ts').default,
-      },
-      {
-        name: './glbUtils.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./glbUtils.ts').default,
-      },
-      {
-        name: './gltf.wgsl',
-        contents: gltfWGSL,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default skinnedMesh;
diff --git a/src/sample/texturedCube/main.ts b/src/sample/texturedCube/main.ts
deleted file mode 100644
index 60c213f8..00000000
--- a/src/sample/texturedCube/main.ts
+++ /dev/null
@@ -1,260 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-
-import basicVertWGSL from '../../shaders/basic.vert.wgsl';
-import sampleTextureMixColorWGSL from './sampleTextureMixColor.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: basicVertWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: sampleTextureMixColorWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Backface culling since the cube is solid piece of geometry.
-      // Faces pointing away from the camera will be occluded by faces
-      // pointing toward the camera.
-      cullMode: 'back',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const uniformBufferSize = 4 * 16; // 4x4 matrix
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  // Fetch the image and upload it into a GPUTexture.
-  let cubeTexture: GPUTexture;
-  {
-    const response = await fetch('../assets/img/Di-3d.png');
-    const imageBitmap = await createImageBitmap(await response.blob());
-
-    cubeTexture = device.createTexture({
-      size: [imageBitmap.width, imageBitmap.height, 1],
-      format: 'rgba8unorm',
-      usage:
-        GPUTextureUsage.TEXTURE_BINDING |
-        GPUTextureUsage.COPY_DST |
-        GPUTextureUsage.RENDER_ATTACHMENT,
-    });
-    device.queue.copyExternalImageToTexture(
-      { source: imageBitmap },
-      { texture: cubeTexture },
-      [imageBitmap.width, imageBitmap.height]
-    );
-  }
-
-  // Create a sampler with linear filtering for smooth interpolation.
-  const sampler = device.createSampler({
-    magFilter: 'linear',
-    minFilter: 'linear',
-  });
-
-  const uniformBindGroup = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-        },
-      },
-      {
-        binding: 1,
-        resource: sampler,
-      },
-      {
-        binding: 2,
-        resource: cubeTexture.createView(),
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-  const modelViewProjectionMatrix = mat4.create();
-
-  function getTransformationMatrix() {
-    const viewMatrix = mat4.identity();
-    mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);
-    const now = Date.now() / 1000;
-    mat4.rotate(
-      viewMatrix,
-      vec3.fromValues(Math.sin(now), Math.cos(now), 0),
-      1,
-      viewMatrix
-    );
-
-    mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);
-
-    return modelViewProjectionMatrix as Float32Array;
-  }
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    const transformationMatrix = getTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      transformationMatrix.buffer,
-      transformationMatrix.byteOffset,
-      transformationMatrix.byteLength
-    );
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setBindGroup(0, uniformBindGroup);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-    passEncoder.draw(cubeVertexCount);
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const TexturedCube: () => JSX.Element = () =>
-  makeSample({
-    name: 'Textured Cube',
-    description: 'This example shows how to bind and sample textures.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/basic.vert.wgsl',
-        contents: basicVertWGSL,
-        editable: true,
-      },
-      {
-        name: './sampleTextureMixColor.frag.wgsl',
-        contents: sampleTextureMixColorWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default TexturedCube;
diff --git a/src/sample/twoCubes/main.ts b/src/sample/twoCubes/main.ts
deleted file mode 100644
index 62064a1f..00000000
--- a/src/sample/twoCubes/main.ts
+++ /dev/null
@@ -1,285 +0,0 @@
-import { mat4, vec3 } from 'wgpu-matrix';
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-import {
-  cubeVertexArray,
-  cubeVertexSize,
-  cubeUVOffset,
-  cubePositionOffset,
-  cubeVertexCount,
-} from '../../meshes/cube';
-
-import basicVertWGSL from '../../shaders/basic.vert.wgsl';
-import vertexPositionColorWGSL from '../../shaders/vertexPositionColor.frag.wgsl';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  const adapter = await navigator.gpu.requestAdapter();
-  const device = await adapter.requestDevice();
-
-  if (!pageState.active) return;
-  const context = canvas.getContext('webgpu') as GPUCanvasContext;
-
-  const devicePixelRatio = window.devicePixelRatio;
-  canvas.width = canvas.clientWidth * devicePixelRatio;
-  canvas.height = canvas.clientHeight * devicePixelRatio;
-  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
-
-  context.configure({
-    device,
-    format: presentationFormat,
-    alphaMode: 'premultiplied',
-  });
-
-  // Create a vertex buffer from the cube data.
-  const verticesBuffer = device.createBuffer({
-    size: cubeVertexArray.byteLength,
-    usage: GPUBufferUsage.VERTEX,
-    mappedAtCreation: true,
-  });
-  new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
-  verticesBuffer.unmap();
-
-  const pipeline = device.createRenderPipeline({
-    layout: 'auto',
-    vertex: {
-      module: device.createShaderModule({
-        code: basicVertWGSL,
-      }),
-      entryPoint: 'main',
-      buffers: [
-        {
-          arrayStride: cubeVertexSize,
-          attributes: [
-            {
-              // position
-              shaderLocation: 0,
-              offset: cubePositionOffset,
-              format: 'float32x4',
-            },
-            {
-              // uv
-              shaderLocation: 1,
-              offset: cubeUVOffset,
-              format: 'float32x2',
-            },
-          ],
-        },
-      ],
-    },
-    fragment: {
-      module: device.createShaderModule({
-        code: vertexPositionColorWGSL,
-      }),
-      entryPoint: 'main',
-      targets: [
-        {
-          format: presentationFormat,
-        },
-      ],
-    },
-    primitive: {
-      topology: 'triangle-list',
-
-      // Backface culling since the cube is solid piece of geometry.
-      // Faces pointing away from the camera will be occluded by faces
-      // pointing toward the camera.
-      cullMode: 'back',
-    },
-
-    // Enable depth testing so that the fragment closest to the camera
-    // is rendered in front.
-    depthStencil: {
-      depthWriteEnabled: true,
-      depthCompare: 'less',
-      format: 'depth24plus',
-    },
-  });
-
-  const depthTexture = device.createTexture({
-    size: [canvas.width, canvas.height],
-    format: 'depth24plus',
-    usage: GPUTextureUsage.RENDER_ATTACHMENT,
-  });
-
-  const matrixSize = 4 * 16; // 4x4 matrix
-  const offset = 256; // uniformBindGroup offset must be 256-byte aligned
-  const uniformBufferSize = offset + matrixSize;
-
-  const uniformBuffer = device.createBuffer({
-    size: uniformBufferSize,
-    usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
-  });
-
-  const uniformBindGroup1 = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-          offset: 0,
-          size: matrixSize,
-        },
-      },
-    ],
-  });
-
-  const uniformBindGroup2 = device.createBindGroup({
-    layout: pipeline.getBindGroupLayout(0),
-    entries: [
-      {
-        binding: 0,
-        resource: {
-          buffer: uniformBuffer,
-          offset: offset,
-          size: matrixSize,
-        },
-      },
-    ],
-  });
-
-  const renderPassDescriptor: GPURenderPassDescriptor = {
-    colorAttachments: [
-      {
-        view: undefined, // Assigned later
-
-        clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
-        loadOp: 'clear',
-        storeOp: 'store',
-      },
-    ],
-    depthStencilAttachment: {
-      view: depthTexture.createView(),
-
-      depthClearValue: 1.0,
-      depthLoadOp: 'clear',
-      depthStoreOp: 'store',
-    },
-  };
-
-  const aspect = canvas.width / canvas.height;
-  const projectionMatrix = mat4.perspective(
-    (2 * Math.PI) / 5,
-    aspect,
-    1,
-    100.0
-  );
-
-  const modelMatrix1 = mat4.translation(vec3.create(-2, 0, 0));
-  const modelMatrix2 = mat4.translation(vec3.create(2, 0, 0));
-  const modelViewProjectionMatrix1 = mat4.create() as Float32Array;
-  const modelViewProjectionMatrix2 = mat4.create() as Float32Array;
-  const viewMatrix = mat4.translation(vec3.fromValues(0, 0, -7));
-
-  const tmpMat41 = mat4.create();
-  const tmpMat42 = mat4.create();
-
-  function updateTransformationMatrix() {
-    const now = Date.now() / 1000;
-
-    mat4.rotate(
-      modelMatrix1,
-      vec3.fromValues(Math.sin(now), Math.cos(now), 0),
-      1,
-      tmpMat41
-    );
-    mat4.rotate(
-      modelMatrix2,
-      vec3.fromValues(Math.cos(now), Math.sin(now), 0),
-      1,
-      tmpMat42
-    );
-
-    mat4.multiply(viewMatrix, tmpMat41, modelViewProjectionMatrix1);
-    mat4.multiply(
-      projectionMatrix,
-      modelViewProjectionMatrix1,
-      modelViewProjectionMatrix1
-    );
-    mat4.multiply(viewMatrix, tmpMat42, modelViewProjectionMatrix2);
-    mat4.multiply(
-      projectionMatrix,
-      modelViewProjectionMatrix2,
-      modelViewProjectionMatrix2
-    );
-  }
-
-  function frame() {
-    // Sample is no longer the active page.
-    if (!pageState.active) return;
-
-    updateTransformationMatrix();
-    device.queue.writeBuffer(
-      uniformBuffer,
-      0,
-      modelViewProjectionMatrix1.buffer,
-      modelViewProjectionMatrix1.byteOffset,
-      modelViewProjectionMatrix1.byteLength
-    );
-    device.queue.writeBuffer(
-      uniformBuffer,
-      offset,
-      modelViewProjectionMatrix2.buffer,
-      modelViewProjectionMatrix2.byteOffset,
-      modelViewProjectionMatrix2.byteLength
-    );
-
-    renderPassDescriptor.colorAttachments[0].view = context
-      .getCurrentTexture()
-      .createView();
-
-    const commandEncoder = device.createCommandEncoder();
-    const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
-    passEncoder.setPipeline(pipeline);
-    passEncoder.setVertexBuffer(0, verticesBuffer);
-
-    // Bind the bind group (with the transformation matrix) for
-    // each cube, and draw.
-    passEncoder.setBindGroup(0, uniformBindGroup1);
-    passEncoder.draw(cubeVertexCount);
-
-    passEncoder.setBindGroup(0, uniformBindGroup2);
-    passEncoder.draw(cubeVertexCount);
-
-    passEncoder.end();
-    device.queue.submit([commandEncoder.finish()]);
-
-    requestAnimationFrame(frame);
-  }
-  requestAnimationFrame(frame);
-};
-
-const TwoCubes: () => JSX.Element = () =>
-  makeSample({
-    name: 'Two Cubes',
-    description:
-      'This example shows some of the alignment requirements \
-       involved when updating and binding multiple slices of a \
-       uniform buffer. It renders two rotating cubes which have transform \
-       matrices at different offsets in a uniform buffer.',
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: '../../shaders/basic.vert.wgsl',
-        contents: basicVertWGSL,
-        editable: true,
-      },
-      {
-        name: '../../shaders/vertexPositionColor.frag.wgsl',
-        contents: vertexPositionColorWGSL,
-        editable: true,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default TwoCubes;
diff --git a/src/sample/worker/main.ts b/src/sample/worker/main.ts
deleted file mode 100644
index 30ce76c5..00000000
--- a/src/sample/worker/main.ts
+++ /dev/null
@@ -1,85 +0,0 @@
-import { makeSample, SampleInit } from '../../components/SampleLayout';
-
-const init: SampleInit = async ({ canvas, pageState }) => {
-  if (!pageState.active) return;
-
-  // The web worker is created by passing a path to the worker's source file, which will then be
-  // executed on a separate thread.
-  const worker = new Worker(new URL('./worker.ts', import.meta.url));
-
-  // The primary way to communicate with the worker is to send and receive messages.
-  worker.addEventListener('message', (ev) => {
-    // The format of the message can be whatever you'd like, but it's helpful to decide on a
-    // consistent convention so that you can tell the message types apart as your apps grow in
-    // complexity. Here we establish a convention that all messages to and from the worker will
-    // have a `type` field that we can use to determine the content of the message.
-    switch (ev.data.type) {
-      default: {
-        console.error(`Unknown Message Type: ${ev.data.type}`);
-      }
-    }
-  });
-
-  try {
-    // In order for the worker to display anything on the page, an OffscreenCanvas must be used.
-    // Here we can create one from our normal canvas by calling transferControlToOffscreen().
-    // Anything drawn to the OffscreenCanvas that call returns will automatically be displayed on
-    // the source canvas on the page.
-    const offscreenCanvas = canvas.transferControlToOffscreen();
-    const devicePixelRatio = window.devicePixelRatio;
-    offscreenCanvas.width = canvas.clientWidth * devicePixelRatio;
-    offscreenCanvas.height = canvas.clientHeight * devicePixelRatio;
-
-    // Send a message to the worker telling it to initialize WebGPU with the OffscreenCanvas. The
-    // array passed as the second argument here indicates that the OffscreenCanvas is to be
-    // transferred to the worker, meaning this main thread will lose access to it and it will be
-    // fully owned by the worker.
-    worker.postMessage({ type: 'init', offscreenCanvas }, [offscreenCanvas]);
-  } catch (err) {
-    // TODO: This catch is added here because React will call init twice with the same canvas, and
-    // the second time will fail the transferControlToOffscreen() because it's already been
-    // transferred. I'd love to know how to get around that.
-    console.warn(err.message);
-    worker.terminate();
-  }
-};
-
-const WebGPUWorker: () => JSX.Element = () =>
-  makeSample({
-    name: 'WebGPU in a Worker',
-    description: `This example shows one method of using WebGPU in a web worker and presenting to
-      the main thread. It uses canvas.transferControlToOffscreen() to produce an offscreen canvas
-      which is then transferred to the worker where all the WebGPU calls are made.`,
-    init,
-    sources: [
-      {
-        name: __filename.substring(__dirname.length + 1),
-        contents: __SOURCE__,
-      },
-      {
-        name: './worker.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!./worker.ts').default,
-      },
-      {
-        name: '../../shaders/basic.vert.wgsl',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../shaders/basic.vert.wgsl').default,
-      },
-      {
-        name: '../../shaders/vertexPositionColor.frag.wgsl',
-        contents:
-          // eslint-disable-next-line @typescript-eslint/no-var-requires
-          require('!!raw-loader!../../shaders/vertexPositionColor.frag.wgsl')
-            .default,
-      },
-      {
-        name: '../../meshes/cube.ts',
-        // eslint-disable-next-line @typescript-eslint/no-var-requires
-        contents: require('!!raw-loader!../../meshes/cube.ts').default,
-      },
-    ],
-    filename: __filename,
-  });
-
-export default WebGPUWorker;
diff --git a/src/samples.ts b/src/samples.ts
new file mode 100644
index 00000000..a19ba4c8
--- /dev/null
+++ b/src/samples.ts
@@ -0,0 +1,134 @@
+import aBuffer from '../sample/a-buffer/meta';
+import animometer from '../sample/animometer/meta';
+import bitonicSort from '../sample/bitonicSort/meta';
+import cameras from '../sample/cameras/meta';
+import cornell from '../sample/cornell/meta';
+import computeBoids from '../sample/computeBoids/meta';
+import cubemap from '../sample/cubemap/meta';
+import deferredRendering from '../sample/deferredRendering/meta';
+import fractalCube from '../sample/fractalCube/meta';
+import gameOfLife from '../sample/gameOfLife/meta';
+import helloTriangle from '../sample/helloTriangle/meta';
+import helloTriangleMSAA from '../sample/helloTriangleMSAA/meta';
+import imageBlur from '../sample/imageBlur/meta';
+import instancedCube from '../sample/instancedCube/meta';
+import normalMap from '../sample/normalMap/meta';
+import particles from '../sample/particles/meta';
+import renderBundles from '../sample/renderBundles/meta';
+import resizeCanvas from '../sample/resizeCanvas/meta';
+import reversedZ from '../sample/reversedZ/meta';
+import rotatingCube from '../sample/rotatingCube/meta';
+import samplerParameters from '../sample/samplerParameters/meta';
+import shadowMapping from '../sample/shadowMapping/meta';
+import skinnedMesh from '../sample/skinnedMesh/meta';
+import texturedCube from '../sample/texturedCube/meta';
+import twoCubes from '../sample/texturedCube/meta';
+import videoUploading from '../sample/videoUploading/meta';
+import worker from '../sample/worker/meta';
+
+export type SourceInfo = {
+  path: string;
+};
+
+export type SampleInfo = {
+  name: string;
+  tocName?: string;
+  description: string;
+  filename: string;
+  sources: SourceInfo[];
+};
+
+type PageCategory = {
+  title: string;
+  description: string;
+  samples: { [key: string]: SampleInfo };
+};
+
+export const pageCategories: PageCategory[] = [
+  // Samples that implement basic rendering functionality using the WebGPU API.
+  {
+    title: 'Basic Graphics',
+    description:
+      'Basic rendering functionality implemented with the WebGPU API.',
+    samples: {
+      helloTriangle,
+      helloTriangleMSAA,
+      rotatingCube,
+      twoCubes,
+      texturedCube,
+      instancedCube,
+      fractalCube,
+      cubemap,
+    },
+  },
+
+  // Samples that demonstrate functionality specific to WebGPU, or demonstrate the particularities
+  // of how WebGPU implements a particular feature within its api. For instance, while many of the
+  // sampler parameters in the 'samplerParameters' sample have direct analogues in other graphics api,
+  // the primary purpose of 'sampleParameters' is to demonstrate their specific nomenclature and
+  // functionality within the context of the WebGPU API.
+  {
+    title: 'WebGPU Features',
+    description: 'Highlights of important WebGPU features.',
+    samples: {
+      samplerParameters,
+      reversedZ,
+      renderBundles,
+    },
+  },
+
+  // Samples that demonstrate the GPGPU functionality of WebGPU. These samples generally provide some
+  // user-facing representation (e.g. image, text, or audio) of the result of compute operations.
+  // Any rendering code is primarily for visualization, not key to the unique part of the sample;
+  // rendering could also be done using canvas2D without detracting from the sample's usefulness.
+  {
+    title: 'GPGPU Demos',
+    description: 'Visualizations of parallel GPU compute operations.',
+    samples: {
+      computeBoids,
+      gameOfLife,
+      bitonicSort,
+    },
+  },
+
+  // A selection of samples demonstrating various graphics techniques, utilizing various features
+  // of the WebGPU API, and often executing render and compute pipelines in tandem to achieve their
+  // visual results. The techniques demonstrated may even be independent of WebGPU (e.g. 'cameras')
+  {
+    title: 'Graphics Techniques',
+    description: 'A collection of graphics techniques implemented with WebGPU.',
+    samples: {
+      cameras,
+      normalMap,
+      shadowMapping,
+      deferredRendering,
+      particles,
+      imageBlur,
+      cornell,
+      'a-buffer': aBuffer,
+      skinnedMesh,
+    },
+  },
+
+  // Samples that demonstrate how to integrate WebGPU and/or WebGPU render operations with other
+  // functionalities provided by the web platform.
+  {
+    title: 'Web Platform Integration',
+    description:
+      'Demos integrating WebGPU with other functionalities of the web platform.',
+    samples: {
+      resizeCanvas,
+      videoUploading,
+      worker,
+    },
+  },
+
+  // Samples whose primary purpose is to benchmark WebGPU performance.
+  {
+    title: 'Benchmarks',
+    description: 'WebGPU Performance Benchmarks',
+    samples: {
+      animometer,
+    },
+  },
+];
diff --git a/src/shaders/sampleTexture.frag.wgsl b/src/shaders/sampleTexture.frag.wgsl
deleted file mode 100644
index 10eedd23..00000000
--- a/src/shaders/sampleTexture.frag.wgsl
+++ /dev/null
@@ -1,10 +0,0 @@
-@group(0) @binding(1) var mySampler: sampler;
-@group(0) @binding(2) var myTexture: texture_2d;
-
-@fragment
-fn main(
-  @location(0) fragUV: vec2,
-  @location(1) fragPosition: vec4
-) -> @location(0) vec4 {
-  return textureSample(myTexture, mySampler, fragUV);
-}
diff --git a/src/tsconfig.json b/src/tsconfig.json
new file mode 100644
index 00000000..5520b7fc
--- /dev/null
+++ b/src/tsconfig.json
@@ -0,0 +1,21 @@
+{
+  "extends": "@tsconfig/recommended/tsconfig.json",
+  "compilerOptions": {
+    "target": "ESNext",
+    "module": "ESNext",
+    "outDir": "../out",
+    "rootDir": "../",
+    "moduleResolution": "Node",
+    "typeRoots": [
+      "../node_modules/@webgpu/types",
+      "../node_modules/@types",
+    ],
+  },
+  "include": [
+    "../src/**/*.ts",
+    "../sample/*.ts",
+  ],
+  "exclude": [
+    "../out"
+  ],
+}
\ No newline at end of file
diff --git a/src/types.d.ts b/src/types.d.ts
index d3c73264..e983166f 100644
--- a/src/types.d.ts
+++ b/src/types.d.ts
@@ -1,31 +1,6 @@
 ///