diff --git a/public/assets/img/volume/.gitignore b/public/assets/img/volume/.gitignore
new file mode 100644
index 00000000..d5779a0b
--- /dev/null
+++ b/public/assets/img/volume/.gitignore
@@ -0,0 +1,4 @@
+slices/
+t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin
+t1_icbm_normal_1mm_pn0_rf0.npy
+t1_icbm_normal_1mm_pn0_rf0.rawb
diff --git a/public/assets/img/volume/README.adoc b/public/assets/img/volume/README.adoc
new file mode 100644
index 00000000..99d8a2be
--- /dev/null
+++ b/public/assets/img/volume/README.adoc
@@ -0,0 +1,58 @@
+:toc:
+
+== Data from BrainWeb: Simulated Brain Database
+
+In the "Volume Rendering - Texture 3D" sample, the implementation uses simulated brain data from BrainWeb. The render results, as seen in the sample, were validated to be representative of standard visualization with VTK.
+
+=== Reproduction
+
+1. Go to https://brainweb.bic.mni.mcgill.ca/brainweb/selection_normal.html
+2. Set modality to T1.
+3. Set slice thickness to 1mm.
+4. Set noise to 0%.
+5. Set RF to 0%.
+6. Click Download.
+7. Set file format to raw byte.
+8. Set compression to none.
+9. Follow other instructions on the website according to your situation.
+10. Click Start Download.
+11. Copy the downloaded *.rawb data to this directory.
+12. Activate a Python environment with at least Python 3.12, Numpy, Scipy, and Pillow.
+13. Start a terminal in this directory.
+14. Run t1_icbm_normal_1mm_pn0_rf0.py script.
+
+=== References
+
+* http://www.bic.mni.mcgill.ca/brainweb/[`http://www.bic.mni.mcgill.ca/brainweb/`]
+
+* C.A. Cocosco, V. Kollokian, R.K.-S. Kwan, A.C. Evans :
+ __"BrainWeb: Online Interface to a 3D MRI Simulated Brain Database"__ +
+ NeuroImage, vol.5, no.4, part 2/4, S425, 1997 -- Proceedings of 3-rd International Conference on Functional Mapping of the Human Brain, Copenhagen, May 1997.
+ ** abstract available in
+ http://www.bic.mni.mcgill.ca/users/crisco/HBM97_abs/HBM97_abs.html[html],
+ http://www.bic.mni.mcgill.ca/users/crisco/HBM97_abs/HBM97_abs.pdf[pdf (500Kb)],
+ or http://www.bic.mni.mcgill.ca/users/crisco/HBM97_abs/HBM97_abs.ps.gz[gnuzip-ed postscript (500Kb)].
+ ** poster available in
+ http://www.bic.mni.mcgill.ca/users/crisco/HBM97_poster/HBM97_poster.pdf[pdf (1.1Mb)],
+ or http://www.bic.mni.mcgill.ca/users/crisco/HBM97_poster/HBM97_poster.ps.gz[gnuzip-ed postscript (850Kb)].
+
+* R.K.-S. Kwan, A.C. Evans, G.B. Pike :
+ __"MRI simulation-based evaluation of image-processing and classification methods"__ +
+ IEEE Transactions on Medical Imaging. 18(11):1085-97, Nov 1999.
+
+* R.K.-S. Kwan, A.C. Evans, G.B. Pike :
+ __"An Extensible MRI Simulator for Post-Processing Evaluation"__ +
+ Visualization in Biomedical Computing (VBC'96). Lecture Notes in Computer Science, vol. 1131. Springer-Verlag, 1996. 135-140.
+ ** paper available in
+ http://www.bic.mni.mcgill.ca/users/rkwan/vbc96/paper/vbc96.html[html],
+ http://www.bic.mni.mcgill.ca/users/rkwan/vbc96/paper/vbc96.ps[postscript (1Mb)],
+ or http://www.bic.mni.mcgill.ca/users/rkwan/vbc96/paper/vbc96.ps.gz[gnuzip-ed postscript (380Kb)].
+ ** poster available in
+ http://www.bic.mni.mcgill.ca/users/rkwan/vbc96/poster/vbc96bw.ps[grey-scale postscript (5.3Mb)],
+ http://www.bic.mni.mcgill.ca/users/rkwan/vbc96/poster/vbc96bw.ps.gz[grey-scale, gnuzip-ed postscript (536Kb)],
+ or http://www.bic.mni.mcgill.ca/users/rkwan/vbc96/poster/vbc96.poster.ps.gz[colour, gnuzip-ed postscript (597Kb)].
+
+* D.L. Collins, A.P. Zijdenbos, V. Kollokian, J.G. Sled, N.J. Kabani, C.J. Holmes, A.C. Evans :
+ __"Design and Construction of a Realistic Digital Brain Phantom"__ +
+ IEEE Transactions on Medical Imaging, vol.17, No.3, p.463--468, June 1998.
+ ** paper available in http://www.bic.mni.mcgill.ca/users/louis/papers/phantom/[html].
diff --git a/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0.py b/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0.py
new file mode 100644
index 00000000..126ffafa
--- /dev/null
+++ b/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0.py
@@ -0,0 +1,64 @@
+import gzip
+import os
+
+import numpy as np
+from PIL import Image
+from scipy.ndimage import zoom
+
+
+def resample_volume_lanczos(byte_array, original_shape, new_shape):
+ volume = np.frombuffer(byte_array, dtype=np.uint8).reshape(original_shape)
+ zoom_factors = [n / o for n, o in zip(new_shape, original_shape)]
+ resampled_volume = zoom(volume, zoom_factors, order=4)
+ return resampled_volume
+
+
+def load_byte_array_from_file(file_path):
+ with open(file_path, "rb") as file:
+ byte_array = file.read()
+ return byte_array
+
+
+file_path = "t1_icbm_normal_1mm_pn0_rf0.rawb"
+byte_array = load_byte_array_from_file(file_path)
+original_shape = (181, 217, 181)
+new_shape = (180, 216, 180)
+resampled_volume = resample_volume_lanczos(byte_array, original_shape, new_shape)
+
+np.save("t1_icbm_normal_1mm_pn0_rf0.npy", resampled_volume)
+
+
+file_path = "t1_icbm_normal_1mm_pn0_rf0.npy"
+data = np.load(file_path)
+os.makedirs("slices", exist_ok=True)
+
+for i, slice in enumerate(data):
+ img = Image.fromarray(slice)
+ if img.mode != "L":
+ img = img.convert("L")
+ img.save(f"slices/slice_{i:03d}.png")
+
+print(f"Exported {len(data)} slices.")
+
+source_directory = "slices"
+final_file_path = "t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin"
+
+with open(final_file_path, "wb") as output_file:
+ for file_name in sorted(os.listdir(source_directory)):
+ if file_name.lower().endswith(".png"):
+ source_path = os.path.join(source_directory, file_name)
+ img = Image.open(source_path).convert("L")
+ img_data = np.array(img, dtype=np.uint8)
+ img_data.tofile(output_file)
+
+print("Images have been successfully converted and concatenated.")
+
+with open("t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin", "rb") as f:
+ bytes_data = f.read()
+
+gzip_filename = "t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin-gz"
+
+with gzip.open(gzip_filename, "wb", compresslevel=9) as f:
+ f.write(bytes_data)
+
+print(f"File compressed and saved as {gzip_filename}")
diff --git a/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin-gz b/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin-gz
new file mode 100644
index 00000000..e2146c2a
Binary files /dev/null and b/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin-gz differ
diff --git a/sample/volumeRenderingTexture3D/index.html b/sample/volumeRenderingTexture3D/index.html
new file mode 100644
index 00000000..24fa23ea
--- /dev/null
+++ b/sample/volumeRenderingTexture3D/index.html
@@ -0,0 +1,30 @@
+
+
+
+
+
+ webgpu-samples: volumeRenderingTexture3D
+
+
+
+
+
+
+
+
diff --git a/sample/volumeRenderingTexture3D/main.ts b/sample/volumeRenderingTexture3D/main.ts
new file mode 100644
index 00000000..4c6454d6
--- /dev/null
+++ b/sample/volumeRenderingTexture3D/main.ts
@@ -0,0 +1,215 @@
+import { mat4 } from 'wgpu-matrix';
+import { GUI } from 'dat.gui';
+import volumeWGSL from './volume.wgsl';
+
+const canvas = document.querySelector('canvas') as HTMLCanvasElement;
+
+const gui = new GUI();
+
+// GUI parameters
+const params: { rotateCamera: boolean; near: number; far: number } = {
+ rotateCamera: true,
+ near: 2.0,
+ far: 7.0,
+};
+
+gui.add(params, 'rotateCamera', true);
+gui.add(params, 'near', 2.0, 7.0);
+gui.add(params, 'far', 2.0, 7.0);
+
+const adapter = await navigator.gpu.requestAdapter();
+const device = await adapter.requestDevice();
+const context = canvas.getContext('webgpu') as GPUCanvasContext;
+
+const sampleCount = 4;
+
+const devicePixelRatio = window.devicePixelRatio;
+canvas.width = canvas.clientWidth * devicePixelRatio;
+canvas.height = canvas.clientHeight * devicePixelRatio;
+const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
+
+context.configure({
+ device,
+ format: presentationFormat,
+ alphaMode: 'premultiplied',
+});
+
+const pipeline = device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: device.createShaderModule({
+ code: volumeWGSL,
+ }),
+ },
+ fragment: {
+ module: device.createShaderModule({
+ code: volumeWGSL,
+ }),
+ targets: [
+ {
+ format: presentationFormat,
+ },
+ ],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ cullMode: 'back',
+ },
+ multisample: {
+ count: sampleCount,
+ },
+});
+
+const texture = device.createTexture({
+ size: [canvas.width, canvas.height],
+ sampleCount,
+ format: presentationFormat,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+});
+const view = texture.createView();
+
+const uniformBufferSize = 4 * 16; // 4x4 matrix
+const uniformBuffer = device.createBuffer({
+ size: uniformBufferSize,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+});
+
+// Fetch the image and upload it into a GPUTexture.
+let volumeTexture: GPUTexture;
+{
+ const width = 180;
+ const height = 216;
+ const depth = 180;
+ const format: GPUTextureFormat = 'r8unorm';
+ const blockLength = 1;
+ const bytesPerBlock = 1;
+ const blocksWide = Math.ceil(width / blockLength);
+ const blocksHigh = Math.ceil(height / blockLength);
+ const bytesPerRow = blocksWide * bytesPerBlock;
+ const dataPath =
+ '../../assets/img/volume/t1_icbm_normal_1mm_pn0_rf0_180x216x180_uint8_1x1.bin-gz';
+
+ // Fetch the compressed data
+ const response = await fetch(dataPath);
+ const compressedArrayBuffer = await response.arrayBuffer();
+
+ // Decompress the data using DecompressionStream for gzip format
+ const decompressionStream = new DecompressionStream('gzip');
+ const decompressedStream = new Response(
+ compressedArrayBuffer
+ ).body.pipeThrough(decompressionStream);
+ const decompressedArrayBuffer = await new Response(
+ decompressedStream
+ ).arrayBuffer();
+ const byteArray = new Uint8Array(decompressedArrayBuffer);
+
+ volumeTexture = device.createTexture({
+ dimension: '3d',
+ size: [width, height, depth],
+ format: format,
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
+ });
+
+ device.queue.writeTexture(
+ {
+ texture: volumeTexture,
+ },
+ byteArray,
+ { bytesPerRow: bytesPerRow, rowsPerImage: blocksHigh },
+ [width, height, depth]
+ );
+}
+
+// Create a sampler with linear filtering for smooth interpolation.
+const sampler = device.createSampler({
+ magFilter: 'linear',
+ minFilter: 'linear',
+ mipmapFilter: 'linear',
+ maxAnisotropy: 16,
+});
+
+const uniformBindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: uniformBuffer,
+ },
+ },
+ {
+ binding: 1,
+ resource: sampler,
+ },
+ {
+ binding: 2,
+ resource: volumeTexture.createView(),
+ },
+ ],
+});
+
+const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: undefined, // Assigned later
+
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'discard',
+ },
+ ],
+};
+
+let rotation = 0;
+
+function getInverseModelViewProjectionMatrix(deltaTime: number) {
+ const viewMatrix = mat4.identity();
+ mat4.translate(viewMatrix, [0, 0, -4], viewMatrix);
+ if (params.rotateCamera) {
+ rotation += deltaTime;
+ }
+ mat4.rotate(
+ viewMatrix,
+ [Math.sin(rotation), Math.cos(rotation), 0],
+ 1,
+ viewMatrix
+ );
+
+ const aspect = canvas.width / canvas.height;
+ const projectionMatrix = mat4.perspective(
+ (2 * Math.PI) / 5,
+ aspect,
+ params.near,
+ params.far
+ );
+ const modelViewProjectionMatrix = mat4.multiply(projectionMatrix, viewMatrix);
+
+ return mat4.invert(modelViewProjectionMatrix) as Float32Array;
+}
+
+let lastFrameMS = Date.now();
+
+function frame() {
+ const now = Date.now();
+ const deltaTime = (now - lastFrameMS) / 1000;
+ lastFrameMS = now;
+
+ const inverseModelViewProjection =
+ getInverseModelViewProjectionMatrix(deltaTime);
+ device.queue.writeBuffer(uniformBuffer, 0, inverseModelViewProjection);
+ renderPassDescriptor.colorAttachments[0].view = view;
+ renderPassDescriptor.colorAttachments[0].resolveTarget = context
+ .getCurrentTexture()
+ .createView();
+
+ const commandEncoder = device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, uniformBindGroup);
+ passEncoder.draw(3);
+ passEncoder.end();
+ device.queue.submit([commandEncoder.finish()]);
+
+ requestAnimationFrame(frame);
+}
+requestAnimationFrame(frame);
diff --git a/sample/volumeRenderingTexture3D/meta.ts b/sample/volumeRenderingTexture3D/meta.ts
new file mode 100644
index 00000000..0320833c
--- /dev/null
+++ b/sample/volumeRenderingTexture3D/meta.ts
@@ -0,0 +1,16 @@
+export default {
+ name: 'Volume Rendering - Texture 3D',
+ description: `This example shows how to render volumes with WebGPU using a 3D
+texture. It demonstrates simple direct volume rendering for photometric content
+through ray marching in a fragment shader, where a full-screen triangle
+determines the color from ray start and step size values as set in the vertex
+shader. This implementation employs data from the BrainWeb Simulated Brain
+Database, with decompression streams, to save disk space and network traffic.
+
+The original raw data is generated using
+[the BrainWeb Simulated Brain Database](https://brainweb.bic.mni.mcgill.ca/brainweb/)
+before processing in
+[a custom Python script](https://github.com/webgpu/webgpu-samples/tree/main/public/assets/img/volume/t1_icbm_normal_1mm_pn0_rf0.py).`,
+ filename: __DIRNAME__,
+ sources: [{ path: 'main.ts' }, { path: 'volume.wgsl' }],
+};
diff --git a/sample/volumeRenderingTexture3D/volume.wgsl b/sample/volumeRenderingTexture3D/volume.wgsl
new file mode 100644
index 00000000..81a358e8
--- /dev/null
+++ b/sample/volumeRenderingTexture3D/volume.wgsl
@@ -0,0 +1,55 @@
+struct Uniforms {
+ inverseModelViewProjectionMatrix : mat4x4f,
+}
+
+@group(0) @binding(0) var uniforms : Uniforms;
+@group(0) @binding(1) var mySampler: sampler;
+@group(0) @binding(2) var myTexture: texture_3d;
+
+struct VertexOutput {
+ @builtin(position) Position : vec4f,
+ @location(0) near : vec3f,
+ @location(1) step : vec3f,
+}
+
+const NumSteps = 64u;
+
+@vertex
+fn vertex_main(
+ @builtin(vertex_index) VertexIndex : u32
+) -> VertexOutput {
+ var pos = array(
+ vec2(-1.0, 3.0),
+ vec2(-1.0, -1.0),
+ vec2(3.0, -1.0)
+ );
+ var xy = pos[VertexIndex];
+ var near = uniforms.inverseModelViewProjectionMatrix * vec4f(xy, 0.0, 1);
+ var far = uniforms.inverseModelViewProjectionMatrix * vec4f(xy, 1, 1);
+ near /= near.w;
+ far /= far.w;
+ return VertexOutput(
+ vec4f(xy, 0.0, 1.0),
+ near.xyz,
+ (far.xyz - near.xyz) / f32(NumSteps)
+ );
+}
+
+@fragment
+fn fragment_main(
+ @location(0) near: vec3f,
+ @location(1) step: vec3f
+) -> @location(0) vec4f {
+ var rayPos = near;
+ var result = 0.0;
+ for (var i = 0u; i < NumSteps; i++) {
+ let texCoord = (rayPos.xyz + 1.0) * 0.5;
+ let sample =
+ textureSample(myTexture, mySampler, texCoord).r * 4.0 / f32(NumSteps);
+ let intersects =
+ all(rayPos.xyz < vec3f(1.0)) && all(rayPos.xyz > vec3f(-1.0));
+ result += select(0.0, (1.0 - result) * sample, intersects && result < 1.0);
+ rayPos += step;
+ }
+ return vec4f(vec3f(result), 1.0);
+}
diff --git a/src/samples.ts b/src/samples.ts
index 8d49deb8..378bcf49 100644
--- a/src/samples.ts
+++ b/src/samples.ts
@@ -33,6 +33,7 @@ import textRenderingMsdf from '../sample/textRenderingMsdf/meta';
import texturedCube from '../sample/texturedCube/meta';
import twoCubes from '../sample/twoCubes/meta';
import videoUploading from '../sample/videoUploading/meta';
+import volumeRenderingTexture3D from '../sample/volumeRenderingTexture3D/meta';
import worker from '../sample/worker/meta';
import workloadSimulator from '../sample/workloadSimulator/meta';
@@ -121,6 +122,7 @@ export const pageCategories: PageCategory[] = [
'a-buffer': aBuffer,
skinnedMesh,
textRenderingMsdf,
+ volumeRenderingTexture3D,
},
},