diff --git a/404.html b/404.html index 4f38c734..ca0cbbb2 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/A-buffer.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/A-buffer.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/A-buffer.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/A-buffer.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/animometer.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/animometer.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/animometer.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/animometer.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/bitonicSort.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/bitonicSort.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/bitonicSort.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/bitonicSort.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/cameras.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/cameras.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/cameras.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/cameras.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/computeBoids.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/computeBoids.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/computeBoids.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/computeBoids.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/cornell.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/cornell.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/cornell.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/cornell.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/cubemap.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/cubemap.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/cubemap.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/cubemap.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/deferredRendering.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/deferredRendering.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/deferredRendering.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/deferredRendering.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/fractalCube.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/fractalCube.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/fractalCube.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/fractalCube.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/gameOfLife.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/gameOfLife.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/gameOfLife.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/gameOfLife.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/helloTriangle.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/helloTriangle.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/helloTriangle.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/helloTriangle.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/helloTriangleMSAA.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/helloTriangleMSAA.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/helloTriangleMSAA.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/helloTriangleMSAA.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/imageBlur.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/imageBlur.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/imageBlur.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/imageBlur.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/instancedCube.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/instancedCube.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/instancedCube.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/instancedCube.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/normalMap.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/normalMap.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/normalMap.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/normalMap.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/particles.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/particles.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/particles.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/particles.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/renderBundles.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/renderBundles.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/renderBundles.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/renderBundles.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/resizeCanvas.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/resizeCanvas.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/resizeCanvas.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/resizeCanvas.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/reversedZ.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/reversedZ.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/reversedZ.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/reversedZ.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/rotatingCube.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/rotatingCube.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/rotatingCube.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/rotatingCube.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/samplerParameters.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/samplerParameters.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/samplerParameters.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/samplerParameters.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/shadowMapping.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/shadowMapping.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/shadowMapping.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/shadowMapping.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/texturedCube.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/texturedCube.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/texturedCube.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/texturedCube.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/twoCubes.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/twoCubes.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/twoCubes.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/twoCubes.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/videoUploading.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/videoUploading.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/videoUploading.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/videoUploading.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/videoUploadingWebCodecs.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/videoUploadingWebCodecs.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/videoUploadingWebCodecs.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/videoUploadingWebCodecs.json diff --git a/_next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/worker.json b/_next/data/6y3wAkp06yFozSc0eZanj/samples/worker.json similarity index 100% rename from _next/data/Qe6LUjeO1VKeAF3cPRg5n/samples/worker.json rename to _next/data/6y3wAkp06yFozSc0eZanj/samples/worker.json diff --git a/_next/static/Qe6LUjeO1VKeAF3cPRg5n/_buildManifest.js b/_next/static/6y3wAkp06yFozSc0eZanj/_buildManifest.js similarity index 100% rename from _next/static/Qe6LUjeO1VKeAF3cPRg5n/_buildManifest.js rename to _next/static/6y3wAkp06yFozSc0eZanj/_buildManifest.js diff --git a/_next/static/Qe6LUjeO1VKeAF3cPRg5n/_ssgManifest.js b/_next/static/6y3wAkp06yFozSc0eZanj/_ssgManifest.js similarity index 100% rename from _next/static/Qe6LUjeO1VKeAF3cPRg5n/_ssgManifest.js rename to _next/static/6y3wAkp06yFozSc0eZanj/_ssgManifest.js diff --git a/_next/static/chunks/874.594d923f25388139.js b/_next/static/chunks/874.594d923f25388139.js new file mode 100644 index 00000000..7a6696e1 --- /dev/null +++ b/_next/static/chunks/874.594d923f25388139.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[874],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return m}});var a=t(5893),r=t(9008),i=t.n(r),o=t(1163),s=t(7294),u=t(9147),c=t.n(u);t(7319);let l=e=>{let n=(0,s.useRef)(null),r=(0,s.useRef)(null),u=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:r}=e;return{name:n,...function(e){let n;let r=null;{r=document.createElement("div");let i=t(4631);n=i(r,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,a.jsx)("div",{...t,children:(0,a.jsx)("div",{ref(t){r&&t&&(t.appendChild(r),n.setOption("value",e))}})})}}}(r)}}),e.sources),l=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),a=new n.GUI({autoPlace:!1});return a.domElement.style.position="relative",a.domElement.style.zIndex="1000",a}},[]),m=(0,s.useRef)(null),f=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),h=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,v]=(0,s.useState)(null),[b,y]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?y(h[1]):y(u[0].name),d&&l.current)for(l.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();f&&m.current&&(f.dom.style.position="absolute",f.showPanel(1),m.current.appendChild(f.dom));let t={active:!0},a=()=>{t.active=!1};try{let r=n.current;if(!r)throw Error("The canvas is not available");let i=e.init({canvas:r,pageState:t,gui:d,stats:f});i instanceof Promise&&i.catch(e=>{console.error(e),v(e)})}catch(o){console.error(o),v(o)}return a},[]),(0,a.jsxs)("main",{children:[(0,a.jsxs)(i(),{children:[(0,a.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,a.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,a.jsx)("meta",{name:"description",content:e.description}),(0,a.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("h1",{children:e.name}),(0,a.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,a.jsx)("p",{children:e.description}),g?(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,a.jsx)("p",{children:"".concat(g)})]}):null]}),(0,a.jsxs)("div",{className:c().canvasContainer,children:[(0,a.jsx)("div",{style:{position:"absolute",left:10},ref:m}),(0,a.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,a.jsx)("canvas",{ref:n})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("nav",{className:c().sourceFileNav,ref:r,children:(0,a.jsx)("div",{className:c().sourceFileScrollContainer,onScroll(e){let n=e.currentTarget,t=n.scrollWidth-n.clientWidth-n.scrollLeft;n.scrollLeft>25?r.current.setAttribute("data-left","true"):r.current.setAttribute("data-left","false"),t>25?r.current.setAttribute("data-right","true"):r.current.setAttribute("data-right","false")},children:(0,a.jsx)("ul",{children:u.map((e,n)=>(0,a.jsx)("li",{children:(0,a.jsx)("a",{href:"#".concat(e.name),"data-active":b==e.name,onClick(){y(e.name)},children:e.name})},n))})})}),u.map((e,n)=>(0,a.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":b==e.name},n))]})]})},d=e=>(0,a.jsx)(l,{...e});function m(e,n){if(!e)throw Error(n)}},6874:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return S}});var a,r,i=t(5671),o="// A storage buffer holding an array of atomic.\n// The array elements are a sequence of red, green, blue components, for each\n// lightmap texel, for each quad surface.\n@group(1) @binding(0)\nvar accumulation : array>;\n\n// The output lightmap texture.\n@group(1) @binding(1)\nvar lightmap : texture_storage_2d_array;\n\n// Uniform data used by the accumulation_to_lightmap entry point\nstruct Uniforms {\n // Scalar for converting accumulation values to output lightmap values\n accumulation_to_lightmap_scale : f32,\n // Accumulation buffer rescaling value\n accumulation_buffer_scale : f32,\n // The width of the light\n light_width : f32,\n // The height of the light\n light_height : f32,\n // The center of the light\n light_center : vec3f,\n}\n\n// accumulation_to_lightmap uniforms binding point\n@group(1) @binding(2) var uniforms : Uniforms;\n\n// Number of photons emitted per workgroup\noverride PhotonsPerWorkgroup : u32;\n\n// Maximum value that can be added to the accumulation buffer from a single photon\noverride PhotonEnergy : f32;\n\n// Number of bounces of each photon\nconst PhotonBounces = 4;\n\n// Amount of light absorbed with each photon bounce (0: 0%, 1: 100%)\nconst LightAbsorbtion = 0.5;\n\n// Radiosity compute shader.\n// Each invocation creates a photon from the light source, and accumulates\n// bounce lighting into the 'accumulation' buffer.\n@compute @workgroup_size(PhotonsPerWorkgroup)\nfn radiosity(@builtin(global_invocation_id) invocation_id : vec3u) {\n init_rand(invocation_id);\n photon();\n}\n\n// Spawns a photon at the light source, performs ray tracing in the scene,\n// accumulating light values into 'accumulation' for each quad surface hit.\nfn photon() {\n // Create a random ray from the light.\n var ray = new_light_ray();\n // Give the photon an initial energy value.\n var color = PhotonEnergy * vec3f(1, 0.8, 0.6);\n\n // Start bouncing.\n for (var i = 0; i < (PhotonBounces+1); i++) {\n // Find the closest hit of the ray with the scene's quads.\n let hit = raytrace(ray);\n let quad = quads[hit.quad];\n\n // Bounce the ray.\n ray.start = hit.pos + quad.plane.xyz * 1e-5;\n ray.dir = normalize(reflect(ray.dir, quad.plane.xyz) + rand_unit_sphere() * 0.75);\n\n // Photon color is multiplied by the quad's color.\n color *= quad.color;\n\n // Accumulate the aborbed light into the 'accumulation' buffer.\n accumulate(hit.uv, hit.quad, color * LightAbsorbtion);\n\n // What wasn't absorbed is reflected.\n color *= 1 - LightAbsorbtion;\n }\n}\n\n// Performs an atomicAdd() with 'color' into the 'accumulation' buffer at 'uv'\n// and 'quad'.\nfn accumulate(uv : vec2f, quad : u32, color : vec3f) {\n let dims = textureDimensions(lightmap);\n let base_idx = accumulation_base_index(vec2u(uv * vec2f(dims)), quad);\n atomicAdd(&accumulation[base_idx + 0], u32(color.r + 0.5));\n atomicAdd(&accumulation[base_idx + 1], u32(color.g + 0.5));\n atomicAdd(&accumulation[base_idx + 2], u32(color.b + 0.5));\n}\n\n// Returns the base element index for the texel at 'coord' for 'quad'\nfn accumulation_base_index(coord : vec2u, quad : u32) -> u32 {\n let dims = textureDimensions(lightmap);\n let c = min(vec2u(dims) - 1, coord);\n return 3 * (c.x + dims.x * c.y + dims.x * dims.y * quad);\n}\n\n// Returns a new Ray at a random point on the light, in a random downwards\n// direction.\nfn new_light_ray() -> Ray {\n let center = uniforms.light_center;\n let pos = center + vec3f(uniforms.light_width * (rand() - 0.5),\n 0,\n uniforms.light_height * (rand() - 0.5));\n var dir = rand_cosine_weighted_hemisphere().xzy;\n dir.y = -dir.y;\n return Ray(pos, dir);\n}\n\noverride AccumulationToLightmapWorkgroupSizeX : u32;\noverride AccumulationToLightmapWorkgroupSizeY : u32;\n\n// Compute shader used to copy the atomic data in 'accumulation' to\n// 'lightmap'. 'accumulation' might also be scaled to reduce integer overflow.\n@compute @workgroup_size(AccumulationToLightmapWorkgroupSizeX, AccumulationToLightmapWorkgroupSizeY)\nfn accumulation_to_lightmap(@builtin(global_invocation_id) invocation_id : vec3u,\n @builtin(workgroup_id) workgroup_id : vec3u) {\n let dims = textureDimensions(lightmap);\n let quad = workgroup_id.z; // The workgroup 'z' value holds the quad index.\n let coord = invocation_id.xy;\n if (all(coord < dims)) {\n // Load the color value out of 'accumulation'\n let base_idx = accumulation_base_index(coord, quad);\n let color = vec3(f32(atomicLoad(&accumulation[base_idx + 0])),\n f32(atomicLoad(&accumulation[base_idx + 1])),\n f32(atomicLoad(&accumulation[base_idx + 2])));\n\n // Multiply the color by 'uniforms.accumulation_to_lightmap_scale' and write it to\n // the lightmap.\n textureStore(lightmap, coord, quad, vec4(color * uniforms.accumulation_to_lightmap_scale, 1));\n\n // If the 'accumulation' buffer is nearing saturation, then\n // 'uniforms.accumulation_buffer_scale' will be less than 1, scaling the values\n // to something less likely to overflow the u32.\n if (uniforms.accumulation_buffer_scale != 1.0) {\n let scaled = color * uniforms.accumulation_buffer_scale + 0.5;\n atomicStore(&accumulation[base_idx + 0], u32(scaled.r));\n atomicStore(&accumulation[base_idx + 1], u32(scaled.g));\n atomicStore(&accumulation[base_idx + 2], u32(scaled.b));\n }\n }\n}\n",s="// The lightmap data\n@group(1) @binding(0) var lightmap : texture_2d_array;\n\n// The sampler used to sample the lightmap\n@group(1) @binding(1) var smpl : sampler;\n\n// Vertex shader input data\nstruct VertexIn {\n @location(0) position : vec4f,\n @location(1) uv : vec3f,\n @location(2) emissive : vec3f,\n}\n\n// Vertex shader output data\nstruct VertexOut {\n @builtin(position) position : vec4f,\n @location(0) uv : vec2f,\n @location(1) emissive : vec3f,\n @interpolate(flat)\n @location(2) quad : u32,\n}\n\n@vertex\nfn vs_main(input : VertexIn) -> VertexOut {\n var output : VertexOut;\n output.position = common_uniforms.mvp * input.position;\n output.uv = input.uv.xy;\n output.quad = u32(input.uv.z + 0.5);\n output.emissive = input.emissive;\n return output;\n}\n\n@fragment\nfn fs_main(vertex_out : VertexOut) -> @location(0) vec4f {\n return textureSample(lightmap, smpl, vertex_out.uv, vertex_out.quad) + vec4f(vertex_out.emissive, 1);\n}\n",u="// The lightmap data\n@group(1) @binding(0) var lightmap : texture_2d_array;\n\n// The sampler used to sample the lightmap\n@group(1) @binding(1) var smpl : sampler;\n\n// The output framebuffer\n@group(1) @binding(2) var framebuffer : texture_storage_2d;\n\noverride WorkgroupSizeX : u32;\noverride WorkgroupSizeY : u32;\n\nconst NumReflectionRays = 5;\n\n@compute @workgroup_size(WorkgroupSizeX, WorkgroupSizeY)\nfn main(@builtin(global_invocation_id) invocation_id : vec3u) {\n if (all(invocation_id.xy < textureDimensions(framebuffer))) {\n init_rand(invocation_id);\n\n // Calculate the fragment's NDC coordinates for the intersection of the near\n // clip plane and far clip plane\n let uv = vec2f(invocation_id.xy) / vec2f(textureDimensions(framebuffer).xy);\n let ndcXY = (uv - 0.5) * vec2(2, -2);\n\n // Transform the coordinates back into world space\n var near = common_uniforms.inv_mvp * vec4f(ndcXY, 0.0, 1);\n var far = common_uniforms.inv_mvp * vec4f(ndcXY, 1, 1);\n near /= near.w;\n far /= far.w;\n\n // Create a ray that starts at the near clip plane, heading in the fragment's\n // z-direction, and raytrace to find the nearest quad that the ray intersects.\n let ray = Ray(near.xyz, normalize(far.xyz - near.xyz));\n let hit = raytrace(ray);\n\n let hit_color = sample_hit(hit);\n var normal = quads[hit.quad].plane.xyz;\n\n // Fire a few rays off the surface to collect some reflections\n let bounce = reflect(ray.dir, normal);\n var reflection : vec3f;\n for (var i = 0; i < NumReflectionRays; i++) {\n let reflection_dir = normalize(bounce + rand_unit_sphere()*0.1);\n let reflection_ray = Ray(hit.pos + bounce * 1e-5, reflection_dir);\n let reflection_hit = raytrace(reflection_ray);\n reflection += sample_hit(reflection_hit);\n }\n let color = mix(reflection / NumReflectionRays, hit_color, 0.95);\n\n textureStore(framebuffer, invocation_id.xy, vec4(color, 1));\n }\n}\n\n\n// Returns the sampled hit quad's lightmap at 'hit.uv', and adds the quad's\n// emissive value.\nfn sample_hit(hit : HitInfo) -> vec3f {\n let quad = quads[hit.quad];\n // Sample the quad's lightmap, and add emissive.\n return textureSampleLevel(lightmap, smpl, hit.uv, hit.quad, 0).rgb +\n quad.emissive * quad.color;\n}\n",c="// The linear-light input framebuffer\n@group(0) @binding(0) var input : texture_2d;\n\n// The tonemapped, gamma-corrected output framebuffer\n@group(0) @binding(1) var output : texture_storage_2d<{OUTPUT_FORMAT}, write>;\n\nconst TonemapExposure = 0.5;\n\nconst Gamma = 2.2;\n\noverride WorkgroupSizeX : u32;\noverride WorkgroupSizeY : u32;\n\n@compute @workgroup_size(WorkgroupSizeX, WorkgroupSizeY)\nfn main(@builtin(global_invocation_id) invocation_id : vec3u) {\n let color = textureLoad(input, invocation_id.xy, 0).rgb;\n let tonemapped = reinhard_tonemap(color);\n textureStore(output, invocation_id.xy, vec4f(tonemapped, 1));\n}\n\nfn reinhard_tonemap(linearColor: vec3f) -> vec3f {\n let color = linearColor * TonemapExposure;\n let mapped = color / (1+color);\n return pow(mapped, vec3f(1 / Gamma));\n}\n",l="const pi = 3.14159265359;\n\n// Quad describes 2D rectangle on a plane\nstruct Quad {\n // The surface plane\n plane : vec4f,\n // A plane with a normal in the 'u' direction, intersecting the origin, at\n // right-angles to the surface plane.\n // The dot product of 'right' with a 'vec4(pos, 1)' will range between [-1..1]\n // if the projected point is within the quad.\n right : vec4f,\n // A plane with a normal in the 'v' direction, intersecting the origin, at\n // right-angles to the surface plane.\n // The dot product of 'up' with a 'vec4(pos, 1)' will range between [-1..1]\n // if the projected point is within the quad.\n up : vec4f,\n // The diffuse color of the quad\n color : vec3f,\n // Emissive value. 0=no emissive, 1=full emissive.\n emissive : f32,\n};\n\n// Ray is a start point and direction.\nstruct Ray {\n start : vec3f,\n dir : vec3f,\n}\n\n// Value for HitInfo.quad if no intersection occured.\nconst kNoHit = 0xffffffff;\n\n// HitInfo describes the hit location of a ray-quad intersection\nstruct HitInfo {\n // Distance along the ray to the intersection\n dist : f32,\n // The quad index that was hit\n quad : u32,\n // The position of the intersection\n pos : vec3f,\n // The UVs of the quad at the point of intersection\n uv : vec2f,\n}\n\n// CommonUniforms uniform buffer data\nstruct CommonUniforms {\n // Model View Projection matrix\n mvp : mat4x4f,\n // Inverse of mvp\n inv_mvp : mat4x4f,\n // Random seed for the workgroup\n seed : vec3u,\n}\n\n// The common uniform buffer binding.\n@group(0) @binding(0) var common_uniforms : CommonUniforms;\n\n// The quad buffer binding.\n@group(0) @binding(1) var quads : array;\n\n// intersect_ray_quad will check to see if the ray 'r' intersects the quad 'q'.\n// If an intersection occurs, and the intersection is closer than 'closest' then\n// the intersection information is returned, otherwise 'closest' is returned.\nfn intersect_ray_quad(r : Ray, quad : u32, closest : HitInfo) -> HitInfo {\n let q = quads[quad];\n let plane_dist = dot(q.plane, vec4(r.start, 1));\n let ray_dist = plane_dist / -dot(q.plane.xyz, r.dir);\n let pos = r.start + r.dir * ray_dist;\n let uv = vec2(dot(vec4f(pos, 1), q.right),\n dot(vec4f(pos, 1), q.up)) * 0.5 + 0.5;\n let hit = plane_dist > 0 &&\n ray_dist > 0 &&\n ray_dist < closest.dist &&\n all((uv > vec2f()) & (uv < vec2f(1)));\n return HitInfo(\n select(closest.dist, ray_dist, hit),\n select(closest.quad, quad, hit),\n select(closest.pos, pos, hit),\n select(closest.uv, uv, hit),\n );\n}\n\n// raytrace finds the closest intersecting quad for the given ray\nfn raytrace(ray : Ray) -> HitInfo {\n var hit = HitInfo();\n hit.dist = 1e20;\n hit.quad = kNoHit;\n for (var quad = 0u; quad < arrayLength(&quads); quad++) {\n hit = intersect_ray_quad(ray, quad, hit);\n }\n return hit;\n}\n\n// A psuedo random number. Initialized with init_rand(), updated with rand().\nvar rnd : vec3u;\n\n// Initializes the random number generator.\nfn init_rand(invocation_id : vec3u) {\n const A = vec3(1741651 * 1009,\n 140893 * 1609 * 13,\n 6521 * 983 * 7 * 2);\n rnd = (invocation_id * A) ^ common_uniforms.seed;\n}\n\n// Returns a random number between 0 and 1.\nfn rand() -> f32 {\n const C = vec3(60493 * 9377,\n 11279 * 2539 * 23,\n 7919 * 631 * 5 * 3);\n\n rnd = (rnd * C) ^ (rnd.yzx >> vec3(4u));\n return f32(rnd.x ^ rnd.y) / f32(0xffffffff);\n}\n\n// Returns a random point within a unit sphere centered at (0,0,0).\nfn rand_unit_sphere() -> vec3f {\n var u = rand();\n var v = rand();\n var theta = u * 2.0 * pi;\n var phi = acos(2.0 * v - 1.0);\n var r = pow(rand(), 1.0/3.0);\n var sin_theta = sin(theta);\n var cos_theta = cos(theta);\n var sin_phi = sin(phi);\n var cos_phi = cos(phi);\n var x = r * sin_phi * sin_theta;\n var y = r * sin_phi * cos_theta;\n var z = r * cos_phi;\n return vec3f(x, y, z);\n}\n\nfn rand_concentric_disk() -> vec2f {\n let u = vec2f(rand(), rand());\n let uOffset = 2.f * u - vec2f(1, 1);\n\n if (uOffset.x == 0 && uOffset.y == 0){\n return vec2f(0, 0);\n }\n\n var theta = 0.0;\n var r = 0.0;\n if (abs(uOffset.x) > abs(uOffset.y)) {\n r = uOffset.x;\n theta = (pi / 4) * (uOffset.y / uOffset.x);\n } else {\n r = uOffset.y;\n theta = (pi / 2) - (pi / 4) * (uOffset.x / uOffset.y);\n }\n return r * vec2f(cos(theta), sin(theta));\n}\n\nfn rand_cosine_weighted_hemisphere() -> vec3f {\n let d = rand_concentric_disk();\n let z = sqrt(max(0.0, 1.0 - d.x * d.x - d.y * d.y));\n return vec3f(d.x, d.y, z);\n}\n",d=t(6416);function m(e){let n=1/d.R3.lenSq(e);return d.R3.mul(d.R3.fromValues(n,n,n),e)}function f(e){let n=d.R3.fromValues(Math.cos(e.rotation)*(e.width/2),0,Math.sin(e.rotation)*(e.depth/2)),t=d.R3.fromValues(0,e.height/2,0),a=d.R3.fromValues(Math.sin(e.rotation)*(e.width/2),0,-Math.cos(e.rotation)*(e.depth/2)),i=e.color instanceof Array?e.color:Array(6).fill(e.color),o=n=>"concave"===e.type?n:d.R3.negate(n);return[{center:d.R3.add(e.center,n),right:o(d.R3.negate(a)),up:t,color:i[r.PositiveX]},{center:d.R3.add(e.center,t),right:o(n),up:d.R3.negate(a),color:i[r.PositiveY]},{center:d.R3.add(e.center,a),right:o(n),up:t,color:i[r.PositiveZ]},{center:d.R3.sub(e.center,n),right:o(a),up:t,color:i[r.NegativeX]},{center:d.R3.sub(e.center,t),right:o(n),up:a,color:i[r.NegativeY]},{center:d.R3.sub(e.center,a),right:o(d.R3.negate(n)),up:t,color:i[r.NegativeZ]}]}(a=r||(r={}))[a.PositiveX=0]="PositiveX",a[a.PositiveY=1]="PositiveY",a[a.PositiveZ=2]="PositiveZ",a[a.NegativeX=3]="NegativeX",a[a.NegativeY=4]="NegativeY",a[a.NegativeZ=5]="NegativeZ";let p={center:d.R3.fromValues(0,9.95,0),right:d.R3.fromValues(1,0,0),up:d.R3.fromValues(0,0,1),color:d.R3.fromValues(5,5,5),emissive:1};class h{constructor(e){this.quads=[...f({center:d.R3.fromValues(0,5,0),width:10,height:10,depth:10,rotation:0,color:[d.R3.fromValues(0,.5,0),d.R3.fromValues(.5,.5,.5),d.R3.fromValues(.5,.5,.5),d.R3.fromValues(.5,0,0),d.R3.fromValues(.5,.5,.5),d.R3.fromValues(.5,.5,.5)],type:"concave"}),...f({center:d.R3.fromValues(1.5,1.5,1),width:3,height:3,depth:3,rotation:.3,color:d.R3.fromValues(.8,.8,.8),type:"convex"}),...f({center:d.R3.fromValues(-2,3,-2),width:3,height:6,depth:3,rotation:-.4,color:d.R3.fromValues(.8,.8,.8),type:"convex"}),p],this.lightCenter=p.center,this.lightWidth=2*d.R3.len(p.right),this.lightHeight=2*d.R3.len(p.up);let n=e.createBuffer({size:64*this.quads.length,usage:GPUBufferUsage.STORAGE,mappedAtCreation:!0}),t=new Float32Array(n.getMappedRange()),a=new Float32Array(40*this.quads.length),r=new Uint32Array(9*this.quads.length),i=0,o=0,s=0,u=0,c=0;for(let l=0;l x\n const x = vec3.fromValues(\n Math.cos(params.rotation) * (params.width / 2),\n 0,\n Math.sin(params.rotation) * (params.depth / 2)\n );\n const y = vec3.fromValues(0, params.height / 2, 0);\n const z = vec3.fromValues(\n Math.sin(params.rotation) * (params.width / 2),\n 0,\n -Math.cos(params.rotation) * (params.depth / 2)\n );\n const colors =\n params.color instanceof Array\n ? params.color\n : new Array(6).fill(params.color);\n const sign = (v: Vec3) => {\n return params.type === 'concave' ? v : vec3.negate(v);\n };\n return [\n {\n // PositiveX\n center: vec3.add(params.center, x),\n right: sign(vec3.negate(z)),\n up: y,\n color: colors[CubeFace.PositiveX],\n },\n {\n // PositiveY\n center: vec3.add(params.center, y),\n right: sign(x),\n up: vec3.negate(z),\n color: colors[CubeFace.PositiveY],\n },\n {\n // PositiveZ\n center: vec3.add(params.center, z),\n right: sign(x),\n up: y,\n color: colors[CubeFace.PositiveZ],\n },\n {\n // NegativeX\n center: vec3.sub(params.center, x),\n right: sign(z),\n up: y,\n color: colors[CubeFace.NegativeX],\n },\n {\n // NegativeY\n center: vec3.sub(params.center, y),\n right: sign(x),\n up: z,\n color: colors[CubeFace.NegativeY],\n },\n {\n // NegativeZ\n center: vec3.sub(params.center, z),\n right: sign(vec3.negate(x)),\n up: y,\n color: colors[CubeFace.NegativeZ],\n },\n ];\n}\n\nconst light: Quad = {\n center: vec3.fromValues(0, 9.95, 0),\n right: vec3.fromValues(1, 0, 0),\n up: vec3.fromValues(0, 0, 1),\n color: vec3.fromValues(5.0, 5.0, 5.0),\n emissive: 1.0,\n};\n\n/**\n * Scene holds the cornell-box scene information.\n */\nexport default class Scene {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n readonly vertexCount: number;\n readonly indexCount: number;\n readonly vertices: GPUBuffer;\n readonly indices: GPUBuffer;\n readonly vertexBufferLayout: GPUVertexBufferLayout[];\n readonly quadBuffer: GPUBuffer;\n readonly quads = [\n ...box({\n center: vec3.fromValues(0, 5, 0),\n width: 10,\n height: 10,\n depth: 10,\n rotation: 0,\n color: [\n vec3.fromValues(0.0, 0.5, 0.0), // PositiveX\n vec3.fromValues(0.5, 0.5, 0.5), // PositiveY\n vec3.fromValues(0.5, 0.5, 0.5), // PositiveZ\n vec3.fromValues(0.5, 0.0, 0.0), // NegativeX\n vec3.fromValues(0.5, 0.5, 0.5), // NegativeY\n vec3.fromValues(0.5, 0.5, 0.5), // NegativeZ\n ],\n type: 'concave',\n }),\n ...box({\n center: vec3.fromValues(1.5, 1.5, 1),\n width: 3,\n height: 3,\n depth: 3,\n rotation: 0.3,\n color: vec3.fromValues(0.8, 0.8, 0.8),\n type: 'convex',\n }),\n ...box({\n center: vec3.fromValues(-2, 3, -2),\n width: 3,\n height: 6,\n depth: 3,\n rotation: -0.4,\n color: vec3.fromValues(0.8, 0.8, 0.8),\n type: 'convex',\n }),\n light,\n ];\n readonly lightCenter = light.center;\n readonly lightWidth = vec3.len(light.right) * 2;\n readonly lightHeight = vec3.len(light.up) * 2;\n\n constructor(device: GPUDevice) {\n const quadStride = 16 * 4;\n const quadBuffer = device.createBuffer({\n size: quadStride * this.quads.length,\n usage: GPUBufferUsage.STORAGE,\n mappedAtCreation: true,\n });\n const quadData = new Float32Array(quadBuffer.getMappedRange());\n const vertexStride = 4 * 10;\n const vertexData = new Float32Array(this.quads.length * vertexStride);\n const indexData = new Uint32Array(this.quads.length * 9); // TODO: 6?\n let vertexCount = 0;\n let indexCount = 0;\n let quadDataOffset = 0;\n let vertexDataOffset = 0;\n let indexDataOffset = 0;\n for (let quadIdx = 0; quadIdx < this.quads.length; quadIdx++) {\n const quad = this.quads[quadIdx];\n const normal = vec3.normalize(vec3.cross(quad.right, quad.up));\n quadData[quadDataOffset++] = normal[0];\n quadData[quadDataOffset++] = normal[1];\n quadData[quadDataOffset++] = normal[2];\n quadData[quadDataOffset++] = -vec3.dot(normal, quad.center);\n\n const invRight = reciprocal(quad.right);\n quadData[quadDataOffset++] = invRight[0];\n quadData[quadDataOffset++] = invRight[1];\n quadData[quadDataOffset++] = invRight[2];\n quadData[quadDataOffset++] = -vec3.dot(invRight, quad.center);\n\n const invUp = reciprocal(quad.up);\n quadData[quadDataOffset++] = invUp[0];\n quadData[quadDataOffset++] = invUp[1];\n quadData[quadDataOffset++] = invUp[2];\n quadData[quadDataOffset++] = -vec3.dot(invUp, quad.center);\n\n quadData[quadDataOffset++] = quad.color[0];\n quadData[quadDataOffset++] = quad.color[1];\n quadData[quadDataOffset++] = quad.color[2];\n quadData[quadDataOffset++] = quad.emissive ?? 0;\n\n // a ----- b\n // | |\n // | m |\n // | |\n // c ----- d\n const a = vec3.add(vec3.sub(quad.center, quad.right), quad.up);\n const b = vec3.add(vec3.add(quad.center, quad.right), quad.up);\n const c = vec3.sub(vec3.sub(quad.center, quad.right), quad.up);\n const d = vec3.sub(vec3.add(quad.center, quad.right), quad.up);\n\n vertexData[vertexDataOffset++] = a[0];\n vertexData[vertexDataOffset++] = a[1];\n vertexData[vertexDataOffset++] = a[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 0; // uv.x\n vertexData[vertexDataOffset++] = 1; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n vertexData[vertexDataOffset++] = b[0];\n vertexData[vertexDataOffset++] = b[1];\n vertexData[vertexDataOffset++] = b[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 1; // uv.x\n vertexData[vertexDataOffset++] = 1; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n vertexData[vertexDataOffset++] = c[0];\n vertexData[vertexDataOffset++] = c[1];\n vertexData[vertexDataOffset++] = c[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 0; // uv.x\n vertexData[vertexDataOffset++] = 0; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n vertexData[vertexDataOffset++] = d[0];\n vertexData[vertexDataOffset++] = d[1];\n vertexData[vertexDataOffset++] = d[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 1; // uv.x\n vertexData[vertexDataOffset++] = 0; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n indexData[indexDataOffset++] = vertexCount + 0; // a\n indexData[indexDataOffset++] = vertexCount + 2; // c\n indexData[indexDataOffset++] = vertexCount + 1; // b\n indexData[indexDataOffset++] = vertexCount + 1; // b\n indexData[indexDataOffset++] = vertexCount + 2; // c\n indexData[indexDataOffset++] = vertexCount + 3; // d\n indexCount += 6;\n vertexCount += 4;\n }\n\n quadBuffer.unmap();\n\n const vertices = device.createBuffer({\n size: vertexData.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(vertices.getMappedRange()).set(vertexData);\n vertices.unmap();\n\n const indices = device.createBuffer({\n size: indexData.byteLength,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n new Uint16Array(indices.getMappedRange()).set(indexData);\n indices.unmap();\n\n const vertexBufferLayout: GPUVertexBufferLayout[] = [\n {\n arrayStride: vertexStride,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0 * 4,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: 4 * 4,\n format: 'float32x3',\n },\n {\n // color\n shaderLocation: 2,\n offset: 7 * 4,\n format: 'float32x3',\n },\n ],\n },\n ];\n\n this.vertexCount = vertexCount;\n this.indexCount = indexCount;\n this.vertices = vertices;\n this.indices = indices;\n this.vertexBufferLayout = vertexBufferLayout;\n this.quadBuffer = quadBuffer;\n }\n}\n"};class g{update(e){let n=d._E.perspective(2*Math.PI/8,e.aspect,.5,100),t=e.rotateCamera?this.frame/1e3:0,a=d._E.lookAt(d.R3.fromValues(15*Math.sin(t),5,15*Math.cos(t)),d.R3.fromValues(0,5,0),d.R3.fromValues(0,1,0)),r=d._E.multiply(n,a),i=d._E.invert(r),o=new Float32Array(this.uniformBuffer.size/4),s=new Uint32Array(o.buffer);for(let u=0;u<16;u++)o[u]=r[u];for(let c=0;c<16;c++)o[c+16]=i[c];s[32]=4294967295*Math.random(),s[33]=4294967295*Math.random(),s[34]=4294967295*Math.random(),this.device.queue.writeBuffer(this.uniformBuffer,0,o.buffer,o.byteOffset,o.byteLength),this.frame++}constructor(e,n){this.wgsl=l,this.frame=0,this.device=e,this.uniformBuffer=e.createBuffer({label:"Common.uniformBuffer",size:144,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});let t=e.createBindGroupLayout({label:"Common.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.VERTEX|GPUShaderStage.COMPUTE,buffer:{type:"uniform"}},{binding:1,visibility:GPUShaderStage.COMPUTE,buffer:{type:"read-only-storage"}}]}),a=e.createBindGroup({label:"Common.bindGroup",layout:t,entries:[{binding:0,resource:{buffer:this.uniformBuffer,offset:0,size:this.uniformBuffer.size}},{binding:1,resource:{buffer:n,offset:0,size:n.size}}]});this.uniforms={bindGroupLayout:t,bindGroup:a}}}g.sourceInfo={name:"src/sample/cornell/common.ts".substring(19),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport commonWGSL from './common.wgsl';\n\n/**\n * Common holds the shared WGSL between the shaders, including the common uniform buffer.\n */\nexport default class Common {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n /** The WGSL of the common shader */\n readonly wgsl = commonWGSL;\n /** The common uniform buffer bind group and layout */\n readonly uniforms: {\n bindGroupLayout: GPUBindGroupLayout;\n bindGroup: GPUBindGroup;\n };\n\n private readonly device: GPUDevice;\n private readonly uniformBuffer: GPUBuffer;\n\n private frame = 0;\n\n constructor(device: GPUDevice, quads: GPUBuffer) {\n this.device = device;\n this.uniformBuffer = device.createBuffer({\n label: 'Common.uniformBuffer',\n size:\n 0 + //\n 4 * 16 + // mvp\n 4 * 16 + // inv_mvp\n 4 * 4, // seed\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Common.bindGroupLayout',\n entries: [\n {\n // common_uniforms\n binding: 0,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.COMPUTE,\n buffer: { type: 'uniform' },\n },\n {\n // quads\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n buffer: { type: 'read-only-storage' },\n },\n ],\n });\n\n const bindGroup = device.createBindGroup({\n label: 'Common.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // common_uniforms\n binding: 0,\n resource: {\n buffer: this.uniformBuffer,\n offset: 0,\n size: this.uniformBuffer.size,\n },\n },\n {\n // quads\n binding: 1,\n resource: {\n buffer: quads,\n offset: 0,\n size: quads.size,\n },\n },\n ],\n });\n\n this.uniforms = { bindGroupLayout, bindGroup };\n }\n\n /** Updates the uniform buffer data */\n update(params: { rotateCamera: boolean; aspect: number }) {\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 8,\n params.aspect,\n 0.5,\n 100\n );\n\n const viewRotation = params.rotateCamera ? this.frame / 1000 : 0;\n\n const viewMatrix = mat4.lookAt(\n vec3.fromValues(\n Math.sin(viewRotation) * 15,\n 5,\n Math.cos(viewRotation) * 15\n ),\n vec3.fromValues(0, 5, 0),\n vec3.fromValues(0, 1, 0)\n );\n const mvp = mat4.multiply(projectionMatrix, viewMatrix);\n const invMVP = mat4.invert(mvp);\n\n const uniformDataF32 = new Float32Array(this.uniformBuffer.size / 4);\n const uniformDataU32 = new Uint32Array(uniformDataF32.buffer);\n for (let i = 0; i < 16; i++) {\n uniformDataF32[i] = mvp[i];\n }\n for (let i = 0; i < 16; i++) {\n uniformDataF32[i + 16] = invMVP[i];\n }\n uniformDataU32[32] = 0xffffffff * Math.random();\n uniformDataU32[33] = 0xffffffff * Math.random();\n uniformDataU32[34] = 0xffffffff * Math.random();\n\n this.device.queue.writeBuffer(\n this.uniformBuffer,\n 0,\n uniformDataF32.buffer,\n uniformDataF32.byteOffset,\n uniformDataF32.byteLength\n );\n\n this.frame++;\n }\n}\n"};class v{run(e){this.accumulationMean+=this.kPhotonsPerFrame*this.kPhotonEnergy/this.kTotalLightmapTexels;let n=1/this.accumulationMean,t=this.accumulationMean>2*this.kAccumulationMeanMax?.5:1;this.accumulationMean*=t;let a=new Float32Array(this.uniformBuffer.size/4);a[0]=n,a[1]=t,a[2]=this.scene.lightWidth,a[3]=this.scene.lightHeight,a[4]=this.scene.lightCenter[0],a[5]=this.scene.lightCenter[1],a[6]=this.scene.lightCenter[2],this.device.queue.writeBuffer(this.uniformBuffer,0,a.buffer,a.byteOffset,a.byteLength);let r=e.beginComputePass();r.setBindGroup(0,this.common.uniforms.bindGroup),r.setBindGroup(1,this.bindGroup),r.setPipeline(this.radiosityPipeline),r.dispatchWorkgroups(this.kWorkgroupsPerFrame),r.setPipeline(this.accumulationToLightmapPipeline),r.dispatchWorkgroups(Math.ceil(v.lightmapWidth/this.kAccumulationToLightmapWorkgroupSizeX),Math.ceil(v.lightmapHeight/this.kAccumulationToLightmapWorkgroupSizeY),this.lightmap.depthOrArrayLayers),r.end()}constructor(e,n,t){this.kPhotonsPerWorkgroup=256,this.kWorkgroupsPerFrame=1024,this.kPhotonsPerFrame=this.kPhotonsPerWorkgroup*this.kWorkgroupsPerFrame,this.kPhotonEnergy=1e5,this.kAccumulationToLightmapWorkgroupSizeX=16,this.kAccumulationToLightmapWorkgroupSizeY=16,this.accumulationMean=0,this.kAccumulationMeanMax=268435456,this.device=e,this.common=n,this.scene=t,this.lightmap=e.createTexture({label:"Radiosity.lightmap",size:{width:v.lightmapWidth,height:v.lightmapHeight,depthOrArrayLayers:t.quads.length},format:v.lightmapFormat,usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.STORAGE_BINDING}),this.accumulationBuffer=e.createBuffer({label:"Radiosity.accumulationBuffer",size:v.lightmapWidth*v.lightmapHeight*t.quads.length*16,usage:GPUBufferUsage.STORAGE}),this.kTotalLightmapTexels=v.lightmapWidth*v.lightmapHeight*t.quads.length,this.uniformBuffer=e.createBuffer({label:"Radiosity.uniformBuffer",size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});let a=e.createBindGroupLayout({label:"Radiosity.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.COMPUTE,buffer:{type:"storage"}},{binding:1,visibility:GPUShaderStage.COMPUTE,storageTexture:{access:"write-only",format:v.lightmapFormat,viewDimension:"2d-array"}},{binding:2,visibility:GPUShaderStage.COMPUTE,buffer:{type:"uniform"}}]});this.bindGroup=e.createBindGroup({label:"Radiosity.bindGroup",layout:a,entries:[{binding:0,resource:{buffer:this.accumulationBuffer,size:this.accumulationBuffer.size}},{binding:1,resource:this.lightmap.createView()},{binding:2,resource:{buffer:this.uniformBuffer,size:this.uniformBuffer.size}}]});let r=e.createShaderModule({code:o+n.wgsl}),i=e.createPipelineLayout({label:"Radiosity.accumulatePipelineLayout",bindGroupLayouts:[n.uniforms.bindGroupLayout,a]});this.radiosityPipeline=e.createComputePipeline({label:"Radiosity.radiosityPipeline",layout:i,compute:{module:r,entryPoint:"radiosity",constants:{PhotonsPerWorkgroup:this.kPhotonsPerWorkgroup,PhotonEnergy:this.kPhotonEnergy}}}),this.accumulationToLightmapPipeline=e.createComputePipeline({label:"Radiosity.accumulationToLightmapPipeline",layout:i,compute:{module:r,entryPoint:"accumulation_to_lightmap",constants:{AccumulationToLightmapWorkgroupSizeX:this.kAccumulationToLightmapWorkgroupSizeX,AccumulationToLightmapWorkgroupSizeY:this.kAccumulationToLightmapWorkgroupSizeY}}})}}v.sourceInfo={name:"src/sample/cornell/radiosity.ts".substring(19),contents:"import Common from './common';\nimport radiosityWGSL from './radiosity.wgsl';\nimport Scene from './scene';\n\n/**\n * Radiosity computes lightmaps, calculated by software raytracing of light in\n * the scene.\n */\nexport default class Radiosity {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n // The output lightmap format and dimensions\n static readonly lightmapFormat = 'rgba16float';\n static readonly lightmapWidth = 256;\n static readonly lightmapHeight = 256;\n\n // The output lightmap.\n readonly lightmap: GPUTexture;\n\n // Number of photons emitted per workgroup.\n // This is equal to the workgroup size (one photon per invocation)\n private readonly kPhotonsPerWorkgroup = 256;\n // Number of radiosity workgroups dispatched per frame.\n private readonly kWorkgroupsPerFrame = 1024;\n private readonly kPhotonsPerFrame =\n this.kPhotonsPerWorkgroup * this.kWorkgroupsPerFrame;\n // Maximum value that can be added to the 'accumulation' buffer, per photon,\n // across all texels.\n private readonly kPhotonEnergy = 100000;\n // The total number of lightmap texels for all quads.\n private readonly kTotalLightmapTexels;\n\n private readonly kAccumulationToLightmapWorkgroupSizeX = 16;\n private readonly kAccumulationToLightmapWorkgroupSizeY = 16;\n\n private readonly device: GPUDevice;\n private readonly common: Common;\n private readonly scene: Scene;\n private readonly radiosityPipeline: GPUComputePipeline;\n private readonly accumulationToLightmapPipeline: GPUComputePipeline;\n private readonly bindGroup: GPUBindGroup;\n private readonly accumulationBuffer: GPUBuffer;\n private readonly uniformBuffer: GPUBuffer;\n\n // The 'accumulation' buffer average value\n private accumulationMean = 0;\n\n // The maximum value of 'accumulationAverage' before all values in\n // 'accumulation' are reduced to avoid integer overflows.\n private readonly kAccumulationMeanMax = 0x10000000;\n\n constructor(device: GPUDevice, common: Common, scene: Scene) {\n this.device = device;\n this.common = common;\n this.scene = scene;\n this.lightmap = device.createTexture({\n label: 'Radiosity.lightmap',\n size: {\n width: Radiosity.lightmapWidth,\n height: Radiosity.lightmapHeight,\n depthOrArrayLayers: scene.quads.length,\n },\n format: Radiosity.lightmapFormat,\n usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING,\n });\n this.accumulationBuffer = device.createBuffer({\n label: 'Radiosity.accumulationBuffer',\n size:\n Radiosity.lightmapWidth *\n Radiosity.lightmapHeight *\n scene.quads.length *\n 16,\n usage: GPUBufferUsage.STORAGE,\n });\n this.kTotalLightmapTexels =\n Radiosity.lightmapWidth * Radiosity.lightmapHeight * scene.quads.length;\n this.uniformBuffer = device.createBuffer({\n label: 'Radiosity.uniformBuffer',\n size: 8 * 4, // 8 x f32\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Radiosity.bindGroupLayout',\n entries: [\n {\n // accumulation buffer\n binding: 0,\n visibility: GPUShaderStage.COMPUTE,\n buffer: { type: 'storage' },\n },\n {\n // lightmap\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n storageTexture: {\n access: 'write-only',\n format: Radiosity.lightmapFormat,\n viewDimension: '2d-array',\n },\n },\n {\n // radiosity_uniforms\n binding: 2,\n visibility: GPUShaderStage.COMPUTE,\n buffer: { type: 'uniform' },\n },\n ],\n });\n this.bindGroup = device.createBindGroup({\n label: 'Radiosity.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // accumulation buffer\n binding: 0,\n resource: {\n buffer: this.accumulationBuffer,\n size: this.accumulationBuffer.size,\n },\n },\n {\n // lightmap\n binding: 1,\n resource: this.lightmap.createView(),\n },\n {\n // radiosity_uniforms\n binding: 2,\n resource: {\n buffer: this.uniformBuffer,\n size: this.uniformBuffer.size,\n },\n },\n ],\n });\n\n const mod = device.createShaderModule({\n code: radiosityWGSL + common.wgsl,\n });\n const pipelineLayout = device.createPipelineLayout({\n label: 'Radiosity.accumulatePipelineLayout',\n bindGroupLayouts: [common.uniforms.bindGroupLayout, bindGroupLayout],\n });\n\n this.radiosityPipeline = device.createComputePipeline({\n label: 'Radiosity.radiosityPipeline',\n layout: pipelineLayout,\n compute: {\n module: mod,\n entryPoint: 'radiosity',\n constants: {\n PhotonsPerWorkgroup: this.kPhotonsPerWorkgroup,\n PhotonEnergy: this.kPhotonEnergy,\n },\n },\n });\n\n this.accumulationToLightmapPipeline = device.createComputePipeline({\n label: 'Radiosity.accumulationToLightmapPipeline',\n layout: pipelineLayout,\n compute: {\n module: mod,\n entryPoint: 'accumulation_to_lightmap',\n constants: {\n AccumulationToLightmapWorkgroupSizeX:\n this.kAccumulationToLightmapWorkgroupSizeX,\n AccumulationToLightmapWorkgroupSizeY:\n this.kAccumulationToLightmapWorkgroupSizeY,\n },\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n // Calculate the new mean value for the accumulation buffer\n this.accumulationMean +=\n (this.kPhotonsPerFrame * this.kPhotonEnergy) / this.kTotalLightmapTexels;\n\n // Calculate the 'accumulation' -> 'lightmap' scale factor from 'accumulationMean'\n const accumulationToLightmapScale = 1 / this.accumulationMean;\n // If 'accumulationMean' is greater than 'kAccumulationMeanMax', then reduce\n // the 'accumulation' buffer values to prevent u32 overflow.\n const accumulationBufferScale =\n this.accumulationMean > 2 * this.kAccumulationMeanMax ? 0.5 : 1;\n this.accumulationMean *= accumulationBufferScale;\n\n // Update the radiosity uniform buffer data.\n const uniformDataF32 = new Float32Array(this.uniformBuffer.size / 4);\n uniformDataF32[0] = accumulationToLightmapScale;\n uniformDataF32[1] = accumulationBufferScale;\n uniformDataF32[2] = this.scene.lightWidth;\n uniformDataF32[3] = this.scene.lightHeight;\n uniformDataF32[4] = this.scene.lightCenter[0];\n uniformDataF32[5] = this.scene.lightCenter[1];\n uniformDataF32[6] = this.scene.lightCenter[2];\n this.device.queue.writeBuffer(\n this.uniformBuffer,\n 0,\n uniformDataF32.buffer,\n uniformDataF32.byteOffset,\n uniformDataF32.byteLength\n );\n\n // Dispatch the radiosity workgroups\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setBindGroup(0, this.common.uniforms.bindGroup);\n passEncoder.setBindGroup(1, this.bindGroup);\n passEncoder.setPipeline(this.radiosityPipeline);\n passEncoder.dispatchWorkgroups(this.kWorkgroupsPerFrame);\n\n // Then copy the 'accumulation' data to 'lightmap'\n passEncoder.setPipeline(this.accumulationToLightmapPipeline);\n passEncoder.dispatchWorkgroups(\n Math.ceil(\n Radiosity.lightmapWidth / this.kAccumulationToLightmapWorkgroupSizeX\n ),\n Math.ceil(\n Radiosity.lightmapHeight / this.kAccumulationToLightmapWorkgroupSizeY\n ),\n this.lightmap.depthOrArrayLayers\n );\n passEncoder.end();\n }\n}\n"},v.lightmapFormat="rgba16float",v.lightmapWidth=256,v.lightmapHeight=256;class b{run(e){let n=e.beginRenderPass(this.renderPassDescriptor);n.setPipeline(this.pipeline),n.setVertexBuffer(0,this.scene.vertices),n.setIndexBuffer(this.scene.indices,"uint16"),n.setBindGroup(0,this.common.uniforms.bindGroup),n.setBindGroup(1,this.bindGroup),n.drawIndexed(this.scene.indexCount),n.end()}constructor(e,n,t,a,r){this.common=n,this.scene=t;let i=e.createTexture({label:"RasterizerRenderer.depthTexture",size:[r.width,r.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT});this.renderPassDescriptor={label:"RasterizerRenderer.renderPassDescriptor",colorAttachments:[{view:r.createView(),clearValue:[.1,.2,.3,1],loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:i.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}};let o=e.createBindGroupLayout({label:"RasterizerRenderer.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,texture:{viewDimension:"2d-array"}},{binding:1,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,sampler:{}}]});this.bindGroup=e.createBindGroup({label:"RasterizerRenderer.bindGroup",layout:o,entries:[{binding:0,resource:a.lightmap.createView()},{binding:1,resource:e.createSampler({addressModeU:"clamp-to-edge",addressModeV:"clamp-to-edge",magFilter:"linear",minFilter:"linear"})}]});let u=e.createShaderModule({label:"RasterizerRenderer.module",code:s+n.wgsl});this.pipeline=e.createRenderPipeline({label:"RasterizerRenderer.pipeline",layout:e.createPipelineLayout({bindGroupLayouts:[n.uniforms.bindGroupLayout,o]}),vertex:{module:u,entryPoint:"vs_main",buffers:t.vertexBufferLayout},fragment:{module:u,entryPoint:"fs_main",targets:[{format:r.format}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}})}}b.sourceInfo={name:"src/sample/cornell/rasterizer.ts".substring(19),contents:"import rasterizerWGSL from './rasterizer.wgsl';\n\nimport Common from './common';\nimport Radiosity from './radiosity';\nimport Scene from './scene';\n\n/**\n * Rasterizer renders the scene using a regular raserization graphics pipeline.\n */\nexport default class Rasterizer {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n private readonly common: Common;\n private readonly scene: Scene;\n private readonly renderPassDescriptor: GPURenderPassDescriptor;\n private readonly pipeline: GPURenderPipeline;\n private readonly bindGroup: GPUBindGroup;\n\n constructor(\n device: GPUDevice,\n common: Common,\n scene: Scene,\n radiosity: Radiosity,\n framebuffer: GPUTexture\n ) {\n this.common = common;\n this.scene = scene;\n\n const depthTexture = device.createTexture({\n label: 'RasterizerRenderer.depthTexture',\n size: [framebuffer.width, framebuffer.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n this.renderPassDescriptor = {\n label: 'RasterizerRenderer.renderPassDescriptor',\n colorAttachments: [\n {\n view: framebuffer.createView(),\n clearValue: [0.1, 0.2, 0.3, 1],\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'RasterizerRenderer.bindGroupLayout',\n entries: [\n {\n // lightmap\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n texture: { viewDimension: '2d-array' },\n },\n {\n // sampler\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n sampler: {},\n },\n ],\n });\n\n this.bindGroup = device.createBindGroup({\n label: 'RasterizerRenderer.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // lightmap\n binding: 0,\n resource: radiosity.lightmap.createView(),\n },\n {\n // sampler\n binding: 1,\n resource: device.createSampler({\n addressModeU: 'clamp-to-edge',\n addressModeV: 'clamp-to-edge',\n magFilter: 'linear',\n minFilter: 'linear',\n }),\n },\n ],\n });\n\n const mod = device.createShaderModule({\n label: 'RasterizerRenderer.module',\n code: rasterizerWGSL + common.wgsl,\n });\n\n this.pipeline = device.createRenderPipeline({\n label: 'RasterizerRenderer.pipeline',\n layout: device.createPipelineLayout({\n bindGroupLayouts: [common.uniforms.bindGroupLayout, bindGroupLayout],\n }),\n vertex: {\n module: mod,\n entryPoint: 'vs_main',\n buffers: scene.vertexBufferLayout,\n },\n fragment: {\n module: mod,\n entryPoint: 'fs_main',\n targets: [{ format: framebuffer.format }],\n },\n primitive: {\n topology: 'triangle-list',\n cullMode: 'back',\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n const passEncoder = commandEncoder.beginRenderPass(\n this.renderPassDescriptor\n );\n passEncoder.setPipeline(this.pipeline);\n passEncoder.setVertexBuffer(0, this.scene.vertices);\n passEncoder.setIndexBuffer(this.scene.indices, 'uint16');\n passEncoder.setBindGroup(0, this.common.uniforms.bindGroup);\n passEncoder.setBindGroup(1, this.bindGroup);\n passEncoder.drawIndexed(this.scene.indexCount);\n passEncoder.end();\n }\n}\n"};class y{run(e){let n=e.beginComputePass();n.setBindGroup(0,this.bindGroup),n.setPipeline(this.pipeline),n.dispatchWorkgroups(Math.ceil(this.width/this.kWorkgroupSizeX),Math.ceil(this.height/this.kWorkgroupSizeY)),n.end()}constructor(e,n,t,a){this.kWorkgroupSizeX=16,this.kWorkgroupSizeY=16,this.width=t.width,this.height=t.height;let r=e.createBindGroupLayout({label:"Tonemapper.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.COMPUTE,texture:{viewDimension:"2d"}},{binding:1,visibility:GPUShaderStage.COMPUTE,storageTexture:{access:"write-only",format:a.format,viewDimension:"2d"}}]});this.bindGroup=e.createBindGroup({label:"Tonemapper.bindGroup",layout:r,entries:[{binding:0,resource:t.createView()},{binding:1,resource:a.createView()}]});let i=e.createShaderModule({code:c.replace("{OUTPUT_FORMAT}",a.format)+n.wgsl}),o=e.createPipelineLayout({label:"Tonemap.pipelineLayout",bindGroupLayouts:[r]});this.pipeline=e.createComputePipeline({label:"Tonemap.pipeline",layout:o,compute:{module:i,entryPoint:"main",constants:{WorkgroupSizeX:this.kWorkgroupSizeX,WorkgroupSizeY:this.kWorkgroupSizeY}}})}}y.sourceInfo={name:"src/sample/cornell/tonemapper.ts".substring(19),contents:"import Common from './common';\nimport tonemapperWGSL from './tonemapper.wgsl';\n\n/**\n * Tonemapper implements a tonemapper to convert a linear-light framebuffer to\n * a gamma-correct, tonemapped framebuffer used for presentation.\n */\nexport default class Tonemapper {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n private readonly bindGroup: GPUBindGroup;\n private readonly pipeline: GPUComputePipeline;\n private readonly width: number;\n private readonly height: number;\n private readonly kWorkgroupSizeX = 16;\n private readonly kWorkgroupSizeY = 16;\n\n constructor(\n device: GPUDevice,\n common: Common,\n input: GPUTexture,\n output: GPUTexture\n ) {\n this.width = input.width;\n this.height = input.height;\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Tonemapper.bindGroupLayout',\n entries: [\n {\n // input\n binding: 0,\n visibility: GPUShaderStage.COMPUTE,\n texture: {\n viewDimension: '2d',\n },\n },\n {\n // output\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n storageTexture: {\n access: 'write-only',\n format: output.format,\n viewDimension: '2d',\n },\n },\n ],\n });\n this.bindGroup = device.createBindGroup({\n label: 'Tonemapper.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // input\n binding: 0,\n resource: input.createView(),\n },\n {\n // output\n binding: 1,\n resource: output.createView(),\n },\n ],\n });\n\n const mod = device.createShaderModule({\n code:\n tonemapperWGSL.replace('{OUTPUT_FORMAT}', output.format) + common.wgsl,\n });\n const pipelineLayout = device.createPipelineLayout({\n label: 'Tonemap.pipelineLayout',\n bindGroupLayouts: [bindGroupLayout],\n });\n\n this.pipeline = device.createComputePipeline({\n label: 'Tonemap.pipeline',\n layout: pipelineLayout,\n compute: {\n module: mod,\n entryPoint: 'main',\n constants: {\n WorkgroupSizeX: this.kWorkgroupSizeX,\n WorkgroupSizeY: this.kWorkgroupSizeY,\n },\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setBindGroup(0, this.bindGroup);\n passEncoder.setPipeline(this.pipeline);\n passEncoder.dispatchWorkgroups(\n Math.ceil(this.width / this.kWorkgroupSizeX),\n Math.ceil(this.height / this.kWorkgroupSizeY)\n );\n passEncoder.end();\n }\n}\n"};class x{run(e){let n=e.beginComputePass();n.setPipeline(this.pipeline),n.setBindGroup(0,this.common.uniforms.bindGroup),n.setBindGroup(1,this.bindGroup),n.dispatchWorkgroups(Math.ceil(this.framebuffer.width/this.kWorkgroupSizeX),Math.ceil(this.framebuffer.height/this.kWorkgroupSizeY)),n.end()}constructor(e,n,t,a){this.kWorkgroupSizeX=16,this.kWorkgroupSizeY=16,this.common=n,this.framebuffer=a;let r=e.createBindGroupLayout({label:"Raytracer.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,texture:{viewDimension:"2d-array"}},{binding:1,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,sampler:{}},{binding:2,visibility:GPUShaderStage.COMPUTE,storageTexture:{access:"write-only",format:a.format,viewDimension:"2d"}}]});this.bindGroup=e.createBindGroup({label:"rendererBindGroup",layout:r,entries:[{binding:0,resource:t.lightmap.createView()},{binding:1,resource:e.createSampler({addressModeU:"clamp-to-edge",addressModeV:"clamp-to-edge",addressModeW:"clamp-to-edge",magFilter:"linear",minFilter:"linear"})},{binding:2,resource:a.createView()}]}),this.pipeline=e.createComputePipeline({label:"raytracerPipeline",layout:e.createPipelineLayout({bindGroupLayouts:[n.uniforms.bindGroupLayout,r]}),compute:{module:e.createShaderModule({code:u+n.wgsl}),entryPoint:"main",constants:{WorkgroupSizeX:this.kWorkgroupSizeX,WorkgroupSizeY:this.kWorkgroupSizeY}}})}}x.sourceInfo={name:"src/sample/cornell/raytracer.ts".substring(19),contents:"import raytracerWGSL from './raytracer.wgsl';\n\nimport Common from './common';\nimport Radiosity from './radiosity';\n\n/**\n * Raytracer renders the scene using a software ray-tracing compute pipeline.\n */\nexport default class Raytracer {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n private readonly common: Common;\n private readonly framebuffer: GPUTexture;\n private readonly pipeline: GPUComputePipeline;\n private readonly bindGroup: GPUBindGroup;\n\n private readonly kWorkgroupSizeX = 16;\n private readonly kWorkgroupSizeY = 16;\n\n constructor(\n device: GPUDevice,\n common: Common,\n radiosity: Radiosity,\n framebuffer: GPUTexture\n ) {\n this.common = common;\n this.framebuffer = framebuffer;\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Raytracer.bindGroupLayout',\n entries: [\n {\n // lightmap\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n texture: { viewDimension: '2d-array' },\n },\n {\n // sampler\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n sampler: {},\n },\n {\n // framebuffer\n binding: 2,\n visibility: GPUShaderStage.COMPUTE,\n storageTexture: {\n access: 'write-only',\n format: framebuffer.format,\n viewDimension: '2d',\n },\n },\n ],\n });\n\n this.bindGroup = device.createBindGroup({\n label: 'rendererBindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: radiosity.lightmap.createView(),\n },\n {\n binding: 1,\n resource: device.createSampler({\n addressModeU: 'clamp-to-edge',\n addressModeV: 'clamp-to-edge',\n addressModeW: 'clamp-to-edge',\n magFilter: 'linear',\n minFilter: 'linear',\n }),\n },\n {\n binding: 2,\n resource: framebuffer.createView(),\n },\n ],\n });\n\n this.pipeline = device.createComputePipeline({\n label: 'raytracerPipeline',\n layout: device.createPipelineLayout({\n bindGroupLayouts: [common.uniforms.bindGroupLayout, bindGroupLayout],\n }),\n compute: {\n module: device.createShaderModule({\n code: raytracerWGSL + common.wgsl,\n }),\n entryPoint: 'main',\n constants: {\n WorkgroupSizeX: this.kWorkgroupSizeX,\n WorkgroupSizeY: this.kWorkgroupSizeY,\n },\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(this.pipeline);\n passEncoder.setBindGroup(0, this.common.uniforms.bindGroup);\n passEncoder.setBindGroup(1, this.bindGroup);\n passEncoder.dispatchWorkgroups(\n Math.ceil(this.framebuffer.width / this.kWorkgroupSizeX),\n Math.ceil(this.framebuffer.height / this.kWorkgroupSizeY)\n );\n passEncoder.end();\n }\n}\n"};var P="src/sample/cornell/main.ts";let _=async e=>{let{canvas:n,pageState:t,gui:a}=e,r=navigator.gpu.getPreferredCanvasFormat(),i="bgra8unorm"===r?["bgra8unorm-storage"]:[],o=await navigator.gpu.requestAdapter();for(let s of i)if(!o.features.has(s))throw Error("sample requires ".concat(s,", but is not supported by the adapter"));let u=await o.requestDevice({requiredFeatures:i});if(!t.active)return;let c={renderer:"rasterizer",rotateCamera:!0};a.add(c,"renderer",["rasterizer","raytracer"]),a.add(c,"rotateCamera",!0);let l=window.devicePixelRatio;n.width=n.clientWidth*l,n.height=n.clientHeight*l;let d=n.getContext("webgpu");d.configure({device:u,format:r,usage:GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.STORAGE_BINDING,alphaMode:"premultiplied"});let m=u.createTexture({label:"framebuffer",size:[n.width,n.height],format:"rgba16float",usage:GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.STORAGE_BINDING|GPUTextureUsage.TEXTURE_BINDING}),f=new h(u),p=new g(u,f.quadBuffer),P=new v(u,p,f),_=new b(u,p,f,P,m),G=new x(u,p,P,m);requestAnimationFrame(function e(){if(!t.active)return;let a=d.getCurrentTexture(),r=u.createCommandEncoder();switch(p.update({rotateCamera:c.rotateCamera,aspect:n.width/n.height}),P.run(r),c.renderer){case"rasterizer":_.run(r);break;case"raytracer":G.run(r)}let i=new y(u,p,m,a);i.run(r),u.queue.submit([r.finish()]),requestAnimationFrame(e)})},G=()=>(0,i.Tl)({name:"Cornell box",description:"A classic Cornell box, using a lightmap generated using software ray-tracing.",gui:!0,init:_,sources:[{name:P.substring(19),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport radiosityWGSL from './radiosity.wgsl';\nimport rasterizerWGSL from './rasterizer.wgsl';\nimport raytracerWGSL from './raytracer.wgsl';\nimport tonemapperWGSL from './tonemapper.wgsl';\nimport commonWGSL from './common.wgsl';\nimport Scene from './scene';\nimport Common from './common';\nimport Radiosity from './radiosity';\nimport Rasterizer from './rasterizer';\nimport Tonemapper from './tonemapper';\nimport Raytracer from './raytracer';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n const requiredFeatures: GPUFeatureName[] =\n presentationFormat === 'bgra8unorm' ? ['bgra8unorm-storage'] : [];\n const adapter = await navigator.gpu.requestAdapter();\n for (const feature of requiredFeatures) {\n if (!adapter.features.has(feature)) {\n throw new Error(\n `sample requires ${feature}, but is not supported by the adapter`\n );\n }\n }\n const device = await adapter.requestDevice({ requiredFeatures });\n\n if (!pageState.active) return;\n\n const params: {\n renderer: 'rasterizer' | 'raytracer';\n rotateCamera: boolean;\n } = {\n renderer: 'rasterizer',\n rotateCamera: true,\n };\n\n gui.add(params, 'renderer', ['rasterizer', 'raytracer']);\n gui.add(params, 'rotateCamera', true);\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n context.configure({\n device,\n format: presentationFormat,\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.STORAGE_BINDING,\n alphaMode: 'premultiplied',\n });\n\n const framebuffer = device.createTexture({\n label: 'framebuffer',\n size: [canvas.width, canvas.height],\n format: 'rgba16float',\n usage:\n GPUTextureUsage.RENDER_ATTACHMENT |\n GPUTextureUsage.STORAGE_BINDING |\n GPUTextureUsage.TEXTURE_BINDING,\n });\n\n const scene = new Scene(device);\n const common = new Common(device, scene.quadBuffer);\n const radiosity = new Radiosity(device, common, scene);\n const rasterizer = new Rasterizer(\n device,\n common,\n scene,\n radiosity,\n framebuffer\n );\n const raytracer = new Raytracer(device, common, radiosity, framebuffer);\n\n function frame() {\n if (!pageState.active) {\n // Sample is no longer the active page.\n return;\n }\n\n const canvasTexture = context.getCurrentTexture();\n const commandEncoder = device.createCommandEncoder();\n\n common.update({\n rotateCamera: params.rotateCamera,\n aspect: canvas.width / canvas.height,\n });\n radiosity.run(commandEncoder);\n\n switch (params.renderer) {\n case 'rasterizer': {\n rasterizer.run(commandEncoder);\n break;\n }\n case 'raytracer': {\n raytracer.run(commandEncoder);\n break;\n }\n }\n\n const tonemapper = new Tonemapper(\n device,\n common,\n framebuffer,\n canvasTexture\n );\n tonemapper.run(commandEncoder);\n\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n\n requestAnimationFrame(frame);\n};\n\nconst CornellBox: () => JSX.Element = () =>\n makeSample({\n name: 'Cornell box',\n description:\n 'A classic Cornell box, using a lightmap generated using software ray-tracing.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n Common.sourceInfo,\n Scene.sourceInfo,\n Radiosity.sourceInfo,\n Rasterizer.sourceInfo,\n Raytracer.sourceInfo,\n Tonemapper.sourceInfo,\n {\n name: './radiosity.wgsl',\n contents: radiosityWGSL,\n editable: true,\n },\n {\n name: './rasterizer.wgsl',\n contents: rasterizerWGSL,\n editable: true,\n },\n {\n name: './raytracer.wgsl',\n contents: raytracerWGSL,\n editable: true,\n },\n {\n name: './tonemapper.wgsl',\n contents: tonemapperWGSL,\n editable: true,\n },\n {\n name: './common.wgsl',\n contents: commonWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default CornellBox;\n"},g.sourceInfo,h.sourceInfo,v.sourceInfo,b.sourceInfo,x.sourceInfo,y.sourceInfo,{name:"./radiosity.wgsl",contents:o,editable:!0},{name:"./rasterizer.wgsl",contents:s,editable:!0},{name:"./raytracer.wgsl",contents:u,editable:!0},{name:"./tonemapper.wgsl",contents:c,editable:!0},{name:"./common.wgsl",contents:l,editable:!0}],filename:P});var S=G},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileScrollContainer:"SampleLayout_sourceFileScrollContainer__LsNEm",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/874.eec0721da88d3bc7.js b/_next/static/chunks/874.eec0721da88d3bc7.js deleted file mode 100644 index d33a176b..00000000 --- a/_next/static/chunks/874.eec0721da88d3bc7.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[874],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return m}});var a=t(5893),r=t(9008),i=t.n(r),o=t(1163),s=t(7294),u=t(9147),c=t.n(u);t(7319);let l=e=>{let n=(0,s.useRef)(null),r=(0,s.useRef)(null),u=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:r}=e;return{name:n,...function(e){let n;let r=null;{r=document.createElement("div");let i=t(4631);n=i(r,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,a.jsx)("div",{...t,children:(0,a.jsx)("div",{ref(t){r&&t&&(t.appendChild(r),n.setOption("value",e))}})})}}}(r)}}),e.sources),l=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),a=new n.GUI({autoPlace:!1});return a.domElement.style.position="relative",a.domElement.style.zIndex="1000",a}},[]),m=(0,s.useRef)(null),f=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),h=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,v]=(0,s.useState)(null),[b,y]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?y(h[1]):y(u[0].name),d&&l.current)for(l.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();f&&m.current&&(f.dom.style.position="absolute",f.showPanel(1),m.current.appendChild(f.dom));let t={active:!0},a=()=>{t.active=!1};try{let r=n.current;if(!r)throw Error("The canvas is not available");let i=e.init({canvas:r,pageState:t,gui:d,stats:f});i instanceof Promise&&i.catch(e=>{console.error(e),v(e)})}catch(o){console.error(o),v(o)}return a},[]),(0,a.jsxs)("main",{children:[(0,a.jsxs)(i(),{children:[(0,a.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,a.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,a.jsx)("meta",{name:"description",content:e.description}),(0,a.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("h1",{children:e.name}),(0,a.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,a.jsx)("p",{children:e.description}),g?(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,a.jsx)("p",{children:"".concat(g)})]}):null]}),(0,a.jsxs)("div",{className:c().canvasContainer,children:[(0,a.jsx)("div",{style:{position:"absolute",left:10},ref:m}),(0,a.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,a.jsx)("canvas",{ref:n})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("nav",{className:c().sourceFileNav,ref:r,children:(0,a.jsx)("div",{className:c().sourceFileScrollContainer,onScroll(e){let n=e.currentTarget,t=n.scrollWidth-n.clientWidth-n.scrollLeft;n.scrollLeft>25?r.current.setAttribute("data-left","true"):r.current.setAttribute("data-left","false"),t>25?r.current.setAttribute("data-right","true"):r.current.setAttribute("data-right","false")},children:(0,a.jsx)("ul",{children:u.map((e,n)=>(0,a.jsx)("li",{children:(0,a.jsx)("a",{href:"#".concat(e.name),"data-active":b==e.name,onClick(){y(e.name)},children:e.name})},n))})})}),u.map((e,n)=>(0,a.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":b==e.name},n))]})]})},d=e=>(0,a.jsx)(l,{...e});function m(e,n){if(!e)throw Error(n)}},6874:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return S}});var a,r,i=t(5671),o="// A storage buffer holding an array of atomic.\n// The array elements are a sequence of red, green, blue components, for each\n// lightmap texel, for each quad surface.\n@group(1) @binding(0)\nvar accumulation : array>;\n\n// The output lightmap texture.\n@group(1) @binding(1)\nvar lightmap : texture_storage_2d_array;\n\n// Uniform data used by the accumulation_to_lightmap entry point\nstruct Uniforms {\n // Scalar for converting accumulation values to output lightmap values\n accumulation_to_lightmap_scale : f32,\n // Accumulation buffer rescaling value\n accumulation_buffer_scale : f32,\n // The width of the light\n light_width : f32,\n // The height of the light\n light_height : f32,\n // The center of the light\n light_center : vec3f,\n}\n\n// accumulation_to_lightmap uniforms binding point\n@group(1) @binding(2) var uniforms : Uniforms;\n\n// Number of photons emitted per workgroup\noverride PhotonsPerWorkgroup : u32;\n\n// Maximum value that can be added to the accumulation buffer from a single photon\noverride PhotonEnergy : f32;\n\n// Number of bounces of each photon\nconst PhotonBounces = 4;\n\n// Amount of light absorbed with each photon bounce (0: 0%, 1: 100%)\nconst LightAbsorbtion = 0.5;\n\n// Radiosity compute shader.\n// Each invocation creates a photon from the light source, and accumulates\n// bounce lighting into the 'accumulation' buffer.\n@compute @workgroup_size(PhotonsPerWorkgroup)\nfn radiosity(@builtin(global_invocation_id) invocation_id : vec3u) {\n init_rand(invocation_id);\n photon();\n}\n\n// Spawns a photon at the light source, performs ray tracing in the scene,\n// accumulating light values into 'accumulation' for each quad surface hit.\nfn photon() {\n // Create a random ray from the light.\n var ray = new_light_ray();\n // Give the photon an initial energy value.\n var color = PhotonEnergy * vec3f(1, 0.8, 0.6);\n\n // Start bouncing.\n for (var i = 0; i < (PhotonBounces+1); i++) {\n // Find the closest hit of the ray with the scene's quads.\n let hit = raytrace(ray);\n let quad = quads[hit.quad];\n\n // Bounce the ray.\n ray.start = hit.pos + quad.plane.xyz * 1e-5;\n ray.dir = normalize(reflect(ray.dir, quad.plane.xyz) + rand_unit_sphere() * 0.75);\n\n // Photon color is multiplied by the quad's color.\n color *= quad.color;\n\n // Accumulate the aborbed light into the 'accumulation' buffer.\n accumulate(hit.uv, hit.quad, color * LightAbsorbtion);\n\n // What wasn't absorbed is reflected.\n color *= 1 - LightAbsorbtion;\n }\n}\n\n// Performs an atomicAdd() with 'color' into the 'accumulation' buffer at 'uv'\n// and 'quad'.\nfn accumulate(uv : vec2f, quad : u32, color : vec3f) {\n let dims = textureDimensions(lightmap);\n let base_idx = accumulation_base_index(vec2u(uv * vec2f(dims)), quad);\n atomicAdd(&accumulation[base_idx + 0], u32(color.r + 0.5));\n atomicAdd(&accumulation[base_idx + 1], u32(color.g + 0.5));\n atomicAdd(&accumulation[base_idx + 2], u32(color.b + 0.5));\n}\n\n// Returns the base element index for the texel at 'coord' for 'quad'\nfn accumulation_base_index(coord : vec2u, quad : u32) -> u32 {\n let dims = textureDimensions(lightmap);\n let c = min(vec2u(dims) - 1, coord);\n return 3 * (c.x + dims.x * c.y + dims.x * dims.y * quad);\n}\n\n// Returns a new Ray at a random point on the light, in a random downwards\n// direction.\nfn new_light_ray() -> Ray {\n let center = uniforms.light_center;\n let pos = center + vec3f(uniforms.light_width * (rand() - 0.5),\n 0,\n uniforms.light_height * (rand() - 0.5));\n var dir = rand_cosine_weighted_hemisphere().xzy;\n dir.y = -dir.y;\n return Ray(pos, dir);\n}\n\noverride AccumulationToLightmapWorkgroupSizeX : u32;\noverride AccumulationToLightmapWorkgroupSizeY : u32;\n\n// Compute shader used to copy the atomic data in 'accumulation' to\n// 'lightmap'. 'accumulation' might also be scaled to reduce integer overflow.\n@compute @workgroup_size(AccumulationToLightmapWorkgroupSizeX, AccumulationToLightmapWorkgroupSizeY)\nfn accumulation_to_lightmap(@builtin(global_invocation_id) invocation_id : vec3u,\n @builtin(workgroup_id) workgroup_id : vec3u) {\n let dims = textureDimensions(lightmap);\n let quad = workgroup_id.z; // The workgroup 'z' value holds the quad index.\n let coord = invocation_id.xy;\n if (all(coord < dims)) {\n // Load the color value out of 'accumulation'\n let base_idx = accumulation_base_index(coord, quad);\n let color = vec3(f32(atomicLoad(&accumulation[base_idx + 0])),\n f32(atomicLoad(&accumulation[base_idx + 1])),\n f32(atomicLoad(&accumulation[base_idx + 2])));\n\n // Multiply the color by 'uniforms.accumulation_to_lightmap_scale' and write it to\n // the lightmap.\n textureStore(lightmap, coord, quad, vec4(color * uniforms.accumulation_to_lightmap_scale, 1));\n\n // If the 'accumulation' buffer is nearing saturation, then\n // 'uniforms.accumulation_buffer_scale' will be less than 1, scaling the values\n // to something less likely to overflow the u32.\n if (uniforms.accumulation_buffer_scale != 1.0) {\n let scaled = color * uniforms.accumulation_buffer_scale + 0.5;\n atomicStore(&accumulation[base_idx + 0], u32(scaled.r));\n atomicStore(&accumulation[base_idx + 1], u32(scaled.g));\n atomicStore(&accumulation[base_idx + 2], u32(scaled.b));\n }\n }\n}\n",s="// The lightmap data\n@group(1) @binding(0) var lightmap : texture_2d_array;\n\n// The sampler used to sample the lightmap\n@group(1) @binding(1) var smpl : sampler;\n\n// Vertex shader input data\nstruct VertexIn {\n @location(0) position : vec4f,\n @location(1) uv : vec3f,\n @location(2) emissive : vec3f,\n}\n\n// Vertex shader output data\nstruct VertexOut {\n @builtin(position) position : vec4f,\n @location(0) uv : vec2f,\n @location(1) emissive : vec3f,\n @interpolate(flat)\n @location(2) quad : u32,\n}\n\n@vertex\nfn vs_main(input : VertexIn) -> VertexOut {\n var output : VertexOut;\n output.position = common_uniforms.mvp * input.position;\n output.uv = input.uv.xy;\n output.quad = u32(input.uv.z + 0.5);\n output.emissive = input.emissive;\n return output;\n}\n\n@fragment\nfn fs_main(vertex_out : VertexOut) -> @location(0) vec4f {\n return textureSample(lightmap, smpl, vertex_out.uv, vertex_out.quad) + vec4f(vertex_out.emissive, 1);\n}\n",u="// The lightmap data\n@group(1) @binding(0) var lightmap : texture_2d_array;\n\n// The sampler used to sample the lightmap\n@group(1) @binding(1) var smpl : sampler;\n\n// The output framebuffer\n@group(1) @binding(2) var framebuffer : texture_storage_2d;\n\noverride WorkgroupSizeX : u32;\noverride WorkgroupSizeY : u32;\n\nconst NumReflectionRays = 5;\n\n@compute @workgroup_size(WorkgroupSizeX, WorkgroupSizeY)\nfn main(@builtin(global_invocation_id) invocation_id : vec3u) {\n if (all(invocation_id.xy < textureDimensions(framebuffer))) {\n init_rand(invocation_id);\n\n // Calculate the fragment's NDC coordinates for the intersection of the near\n // clip plane and far clip plane\n let uv = vec2f(invocation_id.xy) / vec2f(textureDimensions(framebuffer).xy);\n let ndcXY = (uv - 0.5) * vec2(2, -2);\n\n // Transform the coordinates back into world space\n var near = common_uniforms.inv_mvp * vec4f(ndcXY, 0.0, 1);\n var far = common_uniforms.inv_mvp * vec4f(ndcXY, 1, 1);\n near /= near.w;\n far /= far.w;\n\n // Create a ray that starts at the near clip plane, heading in the fragment's\n // z-direction, and raytrace to find the nearest quad that the ray intersects.\n let ray = Ray(near.xyz, normalize(far.xyz - near.xyz));\n let hit = raytrace(ray);\n\n let hit_color = sample_hit(hit);\n var normal = quads[hit.quad].plane.xyz;\n\n // Fire a few rays off the surface to collect some reflections\n let bounce = reflect(ray.dir, normal);\n var reflection : vec3f;\n for (var i = 0; i < NumReflectionRays; i++) {\n let reflection_dir = normalize(bounce + rand_unit_sphere()*0.1);\n let reflection_ray = Ray(hit.pos + bounce * 1e-5, reflection_dir);\n let reflection_hit = raytrace(reflection_ray);\n reflection += sample_hit(reflection_hit);\n }\n let color = mix(reflection / NumReflectionRays, hit_color, 0.95);\n\n textureStore(framebuffer, invocation_id.xy, vec4(color, 1));\n }\n}\n\n\n// Returns the sampled hit quad's lightmap at 'hit.uv', and adds the quad's\n// emissive value.\nfn sample_hit(hit : HitInfo) -> vec3f {\n let quad = quads[hit.quad];\n // Sample the quad's lightmap, and add emissive.\n return textureSampleLevel(lightmap, smpl, hit.uv, hit.quad, 0).rgb +\n quad.emissive * quad.color;\n}\n",c="// The linear-light input framebuffer\n@group(0) @binding(0) var input : texture_2d;\n\n// The tonemapped, gamma-corrected output framebuffer\n@group(0) @binding(1) var output : texture_storage_2d<{OUTPUT_FORMAT}, write>;\n\nconst TonemapExposure = 0.5;\n\nconst Gamma = 2.2;\n\noverride WorkgroupSizeX : u32;\noverride WorkgroupSizeY : u32;\n\n@compute @workgroup_size(WorkgroupSizeX, WorkgroupSizeY)\nfn main(@builtin(global_invocation_id) invocation_id : vec3u) {\n let color = textureLoad(input, invocation_id.xy, 0).rgb;\n let tonemapped = reinhard_tonemap(color);\n textureStore(output, invocation_id.xy, vec4f(tonemapped, 1));\n}\n\nfn reinhard_tonemap(linearColor: vec3f) -> vec3f {\n let color = linearColor * TonemapExposure;\n let mapped = color / (1+color);\n return pow(mapped, vec3f(1 / Gamma));\n}\n",l="const pi = 3.14159265359;\n\n// Quad describes 2D rectangle on a plane\nstruct Quad {\n // The surface plane\n plane : vec4f,\n // A plane with a normal in the 'u' direction, intersecting the origin, at\n // right-angles to the surface plane.\n // The dot product of 'right' with a 'vec4(pos, 1)' will range between [-1..1]\n // if the projected point is within the quad.\n right : vec4f,\n // A plane with a normal in the 'v' direction, intersecting the origin, at\n // right-angles to the surface plane.\n // The dot product of 'up' with a 'vec4(pos, 1)' will range between [-1..1]\n // if the projected point is within the quad.\n up : vec4f,\n // The diffuse color of the quad\n color : vec3f,\n // Emissive value. 0=no emissive, 1=full emissive.\n emissive : f32,\n};\n\n// Ray is a start point and direction.\nstruct Ray {\n start : vec3f,\n dir : vec3f,\n}\n\n// Value for HitInfo.quad if no intersection occured.\nconst kNoHit = 0xffffffff;\n\n// HitInfo describes the hit location of a ray-quad intersection\nstruct HitInfo {\n // Distance along the ray to the intersection\n dist : f32,\n // The quad index that was hit\n quad : u32,\n // The position of the intersection\n pos : vec3f,\n // The UVs of the quad at the point of intersection\n uv : vec2f,\n}\n\n// CommonUniforms uniform buffer data\nstruct CommonUniforms {\n // Model View Projection matrix\n mvp : mat4x4f,\n // Inverse of mvp\n inv_mvp : mat4x4f,\n // Random seed for the workgroup\n seed : vec3u,\n}\n\n// The common uniform buffer binding.\n@group(0) @binding(0) var common_uniforms : CommonUniforms;\n\n// The quad buffer binding.\n@group(0) @binding(1) var quads : array;\n\n// intersect_ray_quad will check to see if the ray 'r' intersects the quad 'q'.\n// If an intersection occurs, and the intersection is closer than 'closest' then\n// the intersection information is returned, otherwise 'closest' is returned.\nfn intersect_ray_quad(r : Ray, quad : u32, closest : HitInfo) -> HitInfo {\n let q = quads[quad];\n let plane_dist = dot(q.plane, vec4(r.start, 1));\n let ray_dist = plane_dist / -dot(q.plane.xyz, r.dir);\n let pos = r.start + r.dir * ray_dist;\n let uv = vec2(dot(vec4f(pos, 1), q.right),\n dot(vec4f(pos, 1), q.up)) * 0.5 + 0.5;\n let hit = plane_dist > 0 &&\n ray_dist > 0 &&\n ray_dist < closest.dist &&\n all((uv > vec2f()) & (uv < vec2f(1)));\n return HitInfo(\n select(closest.dist, ray_dist, hit),\n select(closest.quad, quad, hit),\n select(closest.pos, pos, hit),\n select(closest.uv, uv, hit),\n );\n}\n\n// raytrace finds the closest intersecting quad for the given ray\nfn raytrace(ray : Ray) -> HitInfo {\n var hit = HitInfo();\n hit.dist = 1e20;\n hit.quad = kNoHit;\n for (var quad = 0u; quad < arrayLength(&quads); quad++) {\n hit = intersect_ray_quad(ray, quad, hit);\n }\n return hit;\n}\n\n// A psuedo random number. Initialized with init_rand(), updated with rand().\nvar rnd : vec3u;\n\n// Initializes the random number generator.\nfn init_rand(invocation_id : vec3u) {\n const A = vec3(1741651 * 1009,\n 140893 * 1609 * 13,\n 6521 * 983 * 7 * 2);\n rnd = (invocation_id * A) ^ common_uniforms.seed;\n}\n\n// Returns a random number between 0 and 1.\nfn rand() -> f32 {\n const C = vec3(60493 * 9377,\n 11279 * 2539 * 23,\n 7919 * 631 * 5 * 3);\n\n rnd = (rnd * C) ^ (rnd.yzx >> vec3(4u));\n return f32(rnd.x ^ rnd.y) / 4294967295.0; // 4294967295.0 is f32(0xffffffff). See #337\n}\n\n// Returns a random point within a unit sphere centered at (0,0,0).\nfn rand_unit_sphere() -> vec3f {\n var u = rand();\n var v = rand();\n var theta = u * 2.0 * pi;\n var phi = acos(2.0 * v - 1.0);\n var r = pow(rand(), 1.0/3.0);\n var sin_theta = sin(theta);\n var cos_theta = cos(theta);\n var sin_phi = sin(phi);\n var cos_phi = cos(phi);\n var x = r * sin_phi * sin_theta;\n var y = r * sin_phi * cos_theta;\n var z = r * cos_phi;\n return vec3f(x, y, z);\n}\n\nfn rand_concentric_disk() -> vec2f {\n let u = vec2f(rand(), rand());\n let uOffset = 2.f * u - vec2f(1, 1);\n\n if (uOffset.x == 0 && uOffset.y == 0){\n return vec2f(0, 0);\n }\n\n var theta = 0.0;\n var r = 0.0;\n if (abs(uOffset.x) > abs(uOffset.y)) {\n r = uOffset.x;\n theta = (pi / 4) * (uOffset.y / uOffset.x);\n } else {\n r = uOffset.y;\n theta = (pi / 2) - (pi / 4) * (uOffset.x / uOffset.y);\n }\n return r * vec2f(cos(theta), sin(theta));\n}\n\nfn rand_cosine_weighted_hemisphere() -> vec3f {\n let d = rand_concentric_disk();\n let z = sqrt(max(0.0, 1.0 - d.x * d.x - d.y * d.y));\n return vec3f(d.x, d.y, z);\n}\n",d=t(6416);function m(e){let n=1/d.R3.lenSq(e);return d.R3.mul(d.R3.fromValues(n,n,n),e)}function f(e){let n=d.R3.fromValues(Math.cos(e.rotation)*(e.width/2),0,Math.sin(e.rotation)*(e.depth/2)),t=d.R3.fromValues(0,e.height/2,0),a=d.R3.fromValues(Math.sin(e.rotation)*(e.width/2),0,-Math.cos(e.rotation)*(e.depth/2)),i=e.color instanceof Array?e.color:Array(6).fill(e.color),o=n=>"concave"===e.type?n:d.R3.negate(n);return[{center:d.R3.add(e.center,n),right:o(d.R3.negate(a)),up:t,color:i[r.PositiveX]},{center:d.R3.add(e.center,t),right:o(n),up:d.R3.negate(a),color:i[r.PositiveY]},{center:d.R3.add(e.center,a),right:o(n),up:t,color:i[r.PositiveZ]},{center:d.R3.sub(e.center,n),right:o(a),up:t,color:i[r.NegativeX]},{center:d.R3.sub(e.center,t),right:o(n),up:a,color:i[r.NegativeY]},{center:d.R3.sub(e.center,a),right:o(d.R3.negate(n)),up:t,color:i[r.NegativeZ]}]}(a=r||(r={}))[a.PositiveX=0]="PositiveX",a[a.PositiveY=1]="PositiveY",a[a.PositiveZ=2]="PositiveZ",a[a.NegativeX=3]="NegativeX",a[a.NegativeY=4]="NegativeY",a[a.NegativeZ=5]="NegativeZ";let p={center:d.R3.fromValues(0,9.95,0),right:d.R3.fromValues(1,0,0),up:d.R3.fromValues(0,0,1),color:d.R3.fromValues(5,5,5),emissive:1};class h{constructor(e){this.quads=[...f({center:d.R3.fromValues(0,5,0),width:10,height:10,depth:10,rotation:0,color:[d.R3.fromValues(0,.5,0),d.R3.fromValues(.5,.5,.5),d.R3.fromValues(.5,.5,.5),d.R3.fromValues(.5,0,0),d.R3.fromValues(.5,.5,.5),d.R3.fromValues(.5,.5,.5)],type:"concave"}),...f({center:d.R3.fromValues(1.5,1.5,1),width:3,height:3,depth:3,rotation:.3,color:d.R3.fromValues(.8,.8,.8),type:"convex"}),...f({center:d.R3.fromValues(-2,3,-2),width:3,height:6,depth:3,rotation:-.4,color:d.R3.fromValues(.8,.8,.8),type:"convex"}),p],this.lightCenter=p.center,this.lightWidth=2*d.R3.len(p.right),this.lightHeight=2*d.R3.len(p.up);let n=e.createBuffer({size:64*this.quads.length,usage:GPUBufferUsage.STORAGE,mappedAtCreation:!0}),t=new Float32Array(n.getMappedRange()),a=new Float32Array(40*this.quads.length),r=new Uint32Array(9*this.quads.length),i=0,o=0,s=0,u=0,c=0;for(let l=0;l x\n const x = vec3.fromValues(\n Math.cos(params.rotation) * (params.width / 2),\n 0,\n Math.sin(params.rotation) * (params.depth / 2)\n );\n const y = vec3.fromValues(0, params.height / 2, 0);\n const z = vec3.fromValues(\n Math.sin(params.rotation) * (params.width / 2),\n 0,\n -Math.cos(params.rotation) * (params.depth / 2)\n );\n const colors =\n params.color instanceof Array\n ? params.color\n : new Array(6).fill(params.color);\n const sign = (v: Vec3) => {\n return params.type === 'concave' ? v : vec3.negate(v);\n };\n return [\n {\n // PositiveX\n center: vec3.add(params.center, x),\n right: sign(vec3.negate(z)),\n up: y,\n color: colors[CubeFace.PositiveX],\n },\n {\n // PositiveY\n center: vec3.add(params.center, y),\n right: sign(x),\n up: vec3.negate(z),\n color: colors[CubeFace.PositiveY],\n },\n {\n // PositiveZ\n center: vec3.add(params.center, z),\n right: sign(x),\n up: y,\n color: colors[CubeFace.PositiveZ],\n },\n {\n // NegativeX\n center: vec3.sub(params.center, x),\n right: sign(z),\n up: y,\n color: colors[CubeFace.NegativeX],\n },\n {\n // NegativeY\n center: vec3.sub(params.center, y),\n right: sign(x),\n up: z,\n color: colors[CubeFace.NegativeY],\n },\n {\n // NegativeZ\n center: vec3.sub(params.center, z),\n right: sign(vec3.negate(x)),\n up: y,\n color: colors[CubeFace.NegativeZ],\n },\n ];\n}\n\nconst light: Quad = {\n center: vec3.fromValues(0, 9.95, 0),\n right: vec3.fromValues(1, 0, 0),\n up: vec3.fromValues(0, 0, 1),\n color: vec3.fromValues(5.0, 5.0, 5.0),\n emissive: 1.0,\n};\n\n/**\n * Scene holds the cornell-box scene information.\n */\nexport default class Scene {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n readonly vertexCount: number;\n readonly indexCount: number;\n readonly vertices: GPUBuffer;\n readonly indices: GPUBuffer;\n readonly vertexBufferLayout: GPUVertexBufferLayout[];\n readonly quadBuffer: GPUBuffer;\n readonly quads = [\n ...box({\n center: vec3.fromValues(0, 5, 0),\n width: 10,\n height: 10,\n depth: 10,\n rotation: 0,\n color: [\n vec3.fromValues(0.0, 0.5, 0.0), // PositiveX\n vec3.fromValues(0.5, 0.5, 0.5), // PositiveY\n vec3.fromValues(0.5, 0.5, 0.5), // PositiveZ\n vec3.fromValues(0.5, 0.0, 0.0), // NegativeX\n vec3.fromValues(0.5, 0.5, 0.5), // NegativeY\n vec3.fromValues(0.5, 0.5, 0.5), // NegativeZ\n ],\n type: 'concave',\n }),\n ...box({\n center: vec3.fromValues(1.5, 1.5, 1),\n width: 3,\n height: 3,\n depth: 3,\n rotation: 0.3,\n color: vec3.fromValues(0.8, 0.8, 0.8),\n type: 'convex',\n }),\n ...box({\n center: vec3.fromValues(-2, 3, -2),\n width: 3,\n height: 6,\n depth: 3,\n rotation: -0.4,\n color: vec3.fromValues(0.8, 0.8, 0.8),\n type: 'convex',\n }),\n light,\n ];\n readonly lightCenter = light.center;\n readonly lightWidth = vec3.len(light.right) * 2;\n readonly lightHeight = vec3.len(light.up) * 2;\n\n constructor(device: GPUDevice) {\n const quadStride = 16 * 4;\n const quadBuffer = device.createBuffer({\n size: quadStride * this.quads.length,\n usage: GPUBufferUsage.STORAGE,\n mappedAtCreation: true,\n });\n const quadData = new Float32Array(quadBuffer.getMappedRange());\n const vertexStride = 4 * 10;\n const vertexData = new Float32Array(this.quads.length * vertexStride);\n const indexData = new Uint32Array(this.quads.length * 9); // TODO: 6?\n let vertexCount = 0;\n let indexCount = 0;\n let quadDataOffset = 0;\n let vertexDataOffset = 0;\n let indexDataOffset = 0;\n for (let quadIdx = 0; quadIdx < this.quads.length; quadIdx++) {\n const quad = this.quads[quadIdx];\n const normal = vec3.normalize(vec3.cross(quad.right, quad.up));\n quadData[quadDataOffset++] = normal[0];\n quadData[quadDataOffset++] = normal[1];\n quadData[quadDataOffset++] = normal[2];\n quadData[quadDataOffset++] = -vec3.dot(normal, quad.center);\n\n const invRight = reciprocal(quad.right);\n quadData[quadDataOffset++] = invRight[0];\n quadData[quadDataOffset++] = invRight[1];\n quadData[quadDataOffset++] = invRight[2];\n quadData[quadDataOffset++] = -vec3.dot(invRight, quad.center);\n\n const invUp = reciprocal(quad.up);\n quadData[quadDataOffset++] = invUp[0];\n quadData[quadDataOffset++] = invUp[1];\n quadData[quadDataOffset++] = invUp[2];\n quadData[quadDataOffset++] = -vec3.dot(invUp, quad.center);\n\n quadData[quadDataOffset++] = quad.color[0];\n quadData[quadDataOffset++] = quad.color[1];\n quadData[quadDataOffset++] = quad.color[2];\n quadData[quadDataOffset++] = quad.emissive ?? 0;\n\n // a ----- b\n // | |\n // | m |\n // | |\n // c ----- d\n const a = vec3.add(vec3.sub(quad.center, quad.right), quad.up);\n const b = vec3.add(vec3.add(quad.center, quad.right), quad.up);\n const c = vec3.sub(vec3.sub(quad.center, quad.right), quad.up);\n const d = vec3.sub(vec3.add(quad.center, quad.right), quad.up);\n\n vertexData[vertexDataOffset++] = a[0];\n vertexData[vertexDataOffset++] = a[1];\n vertexData[vertexDataOffset++] = a[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 0; // uv.x\n vertexData[vertexDataOffset++] = 1; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n vertexData[vertexDataOffset++] = b[0];\n vertexData[vertexDataOffset++] = b[1];\n vertexData[vertexDataOffset++] = b[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 1; // uv.x\n vertexData[vertexDataOffset++] = 1; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n vertexData[vertexDataOffset++] = c[0];\n vertexData[vertexDataOffset++] = c[1];\n vertexData[vertexDataOffset++] = c[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 0; // uv.x\n vertexData[vertexDataOffset++] = 0; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n vertexData[vertexDataOffset++] = d[0];\n vertexData[vertexDataOffset++] = d[1];\n vertexData[vertexDataOffset++] = d[2];\n vertexData[vertexDataOffset++] = 1;\n vertexData[vertexDataOffset++] = 1; // uv.x\n vertexData[vertexDataOffset++] = 0; // uv.y\n vertexData[vertexDataOffset++] = quadIdx;\n vertexData[vertexDataOffset++] = quad.color[0] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[1] * (quad.emissive ?? 0);\n vertexData[vertexDataOffset++] = quad.color[2] * (quad.emissive ?? 0);\n\n indexData[indexDataOffset++] = vertexCount + 0; // a\n indexData[indexDataOffset++] = vertexCount + 2; // c\n indexData[indexDataOffset++] = vertexCount + 1; // b\n indexData[indexDataOffset++] = vertexCount + 1; // b\n indexData[indexDataOffset++] = vertexCount + 2; // c\n indexData[indexDataOffset++] = vertexCount + 3; // d\n indexCount += 6;\n vertexCount += 4;\n }\n\n quadBuffer.unmap();\n\n const vertices = device.createBuffer({\n size: vertexData.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(vertices.getMappedRange()).set(vertexData);\n vertices.unmap();\n\n const indices = device.createBuffer({\n size: indexData.byteLength,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n new Uint16Array(indices.getMappedRange()).set(indexData);\n indices.unmap();\n\n const vertexBufferLayout: GPUVertexBufferLayout[] = [\n {\n arrayStride: vertexStride,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0 * 4,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: 4 * 4,\n format: 'float32x3',\n },\n {\n // color\n shaderLocation: 2,\n offset: 7 * 4,\n format: 'float32x3',\n },\n ],\n },\n ];\n\n this.vertexCount = vertexCount;\n this.indexCount = indexCount;\n this.vertices = vertices;\n this.indices = indices;\n this.vertexBufferLayout = vertexBufferLayout;\n this.quadBuffer = quadBuffer;\n }\n}\n"};class g{update(e){let n=d._E.perspective(2*Math.PI/8,e.aspect,.5,100),t=e.rotateCamera?this.frame/1e3:0,a=d._E.lookAt(d.R3.fromValues(15*Math.sin(t),5,15*Math.cos(t)),d.R3.fromValues(0,5,0),d.R3.fromValues(0,1,0)),r=d._E.multiply(n,a),i=d._E.invert(r),o=new Float32Array(this.uniformBuffer.size/4),s=new Uint32Array(o.buffer);for(let u=0;u<16;u++)o[u]=r[u];for(let c=0;c<16;c++)o[c+16]=i[c];s[32]=4294967295*Math.random(),s[33]=4294967295*Math.random(),s[34]=4294967295*Math.random(),this.device.queue.writeBuffer(this.uniformBuffer,0,o.buffer,o.byteOffset,o.byteLength),this.frame++}constructor(e,n){this.wgsl=l,this.frame=0,this.device=e,this.uniformBuffer=e.createBuffer({label:"Common.uniformBuffer",size:144,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});let t=e.createBindGroupLayout({label:"Common.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.VERTEX|GPUShaderStage.COMPUTE,buffer:{type:"uniform"}},{binding:1,visibility:GPUShaderStage.COMPUTE,buffer:{type:"read-only-storage"}}]}),a=e.createBindGroup({label:"Common.bindGroup",layout:t,entries:[{binding:0,resource:{buffer:this.uniformBuffer,offset:0,size:this.uniformBuffer.size}},{binding:1,resource:{buffer:n,offset:0,size:n.size}}]});this.uniforms={bindGroupLayout:t,bindGroup:a}}}g.sourceInfo={name:"src/sample/cornell/common.ts".substring(19),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport commonWGSL from './common.wgsl';\n\n/**\n * Common holds the shared WGSL between the shaders, including the common uniform buffer.\n */\nexport default class Common {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n /** The WGSL of the common shader */\n readonly wgsl = commonWGSL;\n /** The common uniform buffer bind group and layout */\n readonly uniforms: {\n bindGroupLayout: GPUBindGroupLayout;\n bindGroup: GPUBindGroup;\n };\n\n private readonly device: GPUDevice;\n private readonly uniformBuffer: GPUBuffer;\n\n private frame = 0;\n\n constructor(device: GPUDevice, quads: GPUBuffer) {\n this.device = device;\n this.uniformBuffer = device.createBuffer({\n label: 'Common.uniformBuffer',\n size:\n 0 + //\n 4 * 16 + // mvp\n 4 * 16 + // inv_mvp\n 4 * 4, // seed\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Common.bindGroupLayout',\n entries: [\n {\n // common_uniforms\n binding: 0,\n visibility: GPUShaderStage.VERTEX | GPUShaderStage.COMPUTE,\n buffer: { type: 'uniform' },\n },\n {\n // quads\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n buffer: { type: 'read-only-storage' },\n },\n ],\n });\n\n const bindGroup = device.createBindGroup({\n label: 'Common.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // common_uniforms\n binding: 0,\n resource: {\n buffer: this.uniformBuffer,\n offset: 0,\n size: this.uniformBuffer.size,\n },\n },\n {\n // quads\n binding: 1,\n resource: {\n buffer: quads,\n offset: 0,\n size: quads.size,\n },\n },\n ],\n });\n\n this.uniforms = { bindGroupLayout, bindGroup };\n }\n\n /** Updates the uniform buffer data */\n update(params: { rotateCamera: boolean; aspect: number }) {\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 8,\n params.aspect,\n 0.5,\n 100\n );\n\n const viewRotation = params.rotateCamera ? this.frame / 1000 : 0;\n\n const viewMatrix = mat4.lookAt(\n vec3.fromValues(\n Math.sin(viewRotation) * 15,\n 5,\n Math.cos(viewRotation) * 15\n ),\n vec3.fromValues(0, 5, 0),\n vec3.fromValues(0, 1, 0)\n );\n const mvp = mat4.multiply(projectionMatrix, viewMatrix);\n const invMVP = mat4.invert(mvp);\n\n const uniformDataF32 = new Float32Array(this.uniformBuffer.size / 4);\n const uniformDataU32 = new Uint32Array(uniformDataF32.buffer);\n for (let i = 0; i < 16; i++) {\n uniformDataF32[i] = mvp[i];\n }\n for (let i = 0; i < 16; i++) {\n uniformDataF32[i + 16] = invMVP[i];\n }\n uniformDataU32[32] = 0xffffffff * Math.random();\n uniformDataU32[33] = 0xffffffff * Math.random();\n uniformDataU32[34] = 0xffffffff * Math.random();\n\n this.device.queue.writeBuffer(\n this.uniformBuffer,\n 0,\n uniformDataF32.buffer,\n uniformDataF32.byteOffset,\n uniformDataF32.byteLength\n );\n\n this.frame++;\n }\n}\n"};class v{run(e){this.accumulationMean+=this.kPhotonsPerFrame*this.kPhotonEnergy/this.kTotalLightmapTexels;let n=1/this.accumulationMean,t=this.accumulationMean>2*this.kAccumulationMeanMax?.5:1;this.accumulationMean*=t;let a=new Float32Array(this.uniformBuffer.size/4);a[0]=n,a[1]=t,a[2]=this.scene.lightWidth,a[3]=this.scene.lightHeight,a[4]=this.scene.lightCenter[0],a[5]=this.scene.lightCenter[1],a[6]=this.scene.lightCenter[2],this.device.queue.writeBuffer(this.uniformBuffer,0,a.buffer,a.byteOffset,a.byteLength);let r=e.beginComputePass();r.setBindGroup(0,this.common.uniforms.bindGroup),r.setBindGroup(1,this.bindGroup),r.setPipeline(this.radiosityPipeline),r.dispatchWorkgroups(this.kWorkgroupsPerFrame),r.setPipeline(this.accumulationToLightmapPipeline),r.dispatchWorkgroups(Math.ceil(v.lightmapWidth/this.kAccumulationToLightmapWorkgroupSizeX),Math.ceil(v.lightmapHeight/this.kAccumulationToLightmapWorkgroupSizeY),this.lightmap.depthOrArrayLayers),r.end()}constructor(e,n,t){this.kPhotonsPerWorkgroup=256,this.kWorkgroupsPerFrame=1024,this.kPhotonsPerFrame=this.kPhotonsPerWorkgroup*this.kWorkgroupsPerFrame,this.kPhotonEnergy=1e5,this.kAccumulationToLightmapWorkgroupSizeX=16,this.kAccumulationToLightmapWorkgroupSizeY=16,this.accumulationMean=0,this.kAccumulationMeanMax=268435456,this.device=e,this.common=n,this.scene=t,this.lightmap=e.createTexture({label:"Radiosity.lightmap",size:{width:v.lightmapWidth,height:v.lightmapHeight,depthOrArrayLayers:t.quads.length},format:v.lightmapFormat,usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.STORAGE_BINDING}),this.accumulationBuffer=e.createBuffer({label:"Radiosity.accumulationBuffer",size:v.lightmapWidth*v.lightmapHeight*t.quads.length*16,usage:GPUBufferUsage.STORAGE}),this.kTotalLightmapTexels=v.lightmapWidth*v.lightmapHeight*t.quads.length,this.uniformBuffer=e.createBuffer({label:"Radiosity.uniformBuffer",size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});let a=e.createBindGroupLayout({label:"Radiosity.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.COMPUTE,buffer:{type:"storage"}},{binding:1,visibility:GPUShaderStage.COMPUTE,storageTexture:{access:"write-only",format:v.lightmapFormat,viewDimension:"2d-array"}},{binding:2,visibility:GPUShaderStage.COMPUTE,buffer:{type:"uniform"}}]});this.bindGroup=e.createBindGroup({label:"Radiosity.bindGroup",layout:a,entries:[{binding:0,resource:{buffer:this.accumulationBuffer,size:this.accumulationBuffer.size}},{binding:1,resource:this.lightmap.createView()},{binding:2,resource:{buffer:this.uniformBuffer,size:this.uniformBuffer.size}}]});let r=e.createShaderModule({code:o+n.wgsl}),i=e.createPipelineLayout({label:"Radiosity.accumulatePipelineLayout",bindGroupLayouts:[n.uniforms.bindGroupLayout,a]});this.radiosityPipeline=e.createComputePipeline({label:"Radiosity.radiosityPipeline",layout:i,compute:{module:r,entryPoint:"radiosity",constants:{PhotonsPerWorkgroup:this.kPhotonsPerWorkgroup,PhotonEnergy:this.kPhotonEnergy}}}),this.accumulationToLightmapPipeline=e.createComputePipeline({label:"Radiosity.accumulationToLightmapPipeline",layout:i,compute:{module:r,entryPoint:"accumulation_to_lightmap",constants:{AccumulationToLightmapWorkgroupSizeX:this.kAccumulationToLightmapWorkgroupSizeX,AccumulationToLightmapWorkgroupSizeY:this.kAccumulationToLightmapWorkgroupSizeY}}})}}v.sourceInfo={name:"src/sample/cornell/radiosity.ts".substring(19),contents:"import Common from './common';\nimport radiosityWGSL from './radiosity.wgsl';\nimport Scene from './scene';\n\n/**\n * Radiosity computes lightmaps, calculated by software raytracing of light in\n * the scene.\n */\nexport default class Radiosity {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n // The output lightmap format and dimensions\n static readonly lightmapFormat = 'rgba16float';\n static readonly lightmapWidth = 256;\n static readonly lightmapHeight = 256;\n\n // The output lightmap.\n readonly lightmap: GPUTexture;\n\n // Number of photons emitted per workgroup.\n // This is equal to the workgroup size (one photon per invocation)\n private readonly kPhotonsPerWorkgroup = 256;\n // Number of radiosity workgroups dispatched per frame.\n private readonly kWorkgroupsPerFrame = 1024;\n private readonly kPhotonsPerFrame =\n this.kPhotonsPerWorkgroup * this.kWorkgroupsPerFrame;\n // Maximum value that can be added to the 'accumulation' buffer, per photon,\n // across all texels.\n private readonly kPhotonEnergy = 100000;\n // The total number of lightmap texels for all quads.\n private readonly kTotalLightmapTexels;\n\n private readonly kAccumulationToLightmapWorkgroupSizeX = 16;\n private readonly kAccumulationToLightmapWorkgroupSizeY = 16;\n\n private readonly device: GPUDevice;\n private readonly common: Common;\n private readonly scene: Scene;\n private readonly radiosityPipeline: GPUComputePipeline;\n private readonly accumulationToLightmapPipeline: GPUComputePipeline;\n private readonly bindGroup: GPUBindGroup;\n private readonly accumulationBuffer: GPUBuffer;\n private readonly uniformBuffer: GPUBuffer;\n\n // The 'accumulation' buffer average value\n private accumulationMean = 0;\n\n // The maximum value of 'accumulationAverage' before all values in\n // 'accumulation' are reduced to avoid integer overflows.\n private readonly kAccumulationMeanMax = 0x10000000;\n\n constructor(device: GPUDevice, common: Common, scene: Scene) {\n this.device = device;\n this.common = common;\n this.scene = scene;\n this.lightmap = device.createTexture({\n label: 'Radiosity.lightmap',\n size: {\n width: Radiosity.lightmapWidth,\n height: Radiosity.lightmapHeight,\n depthOrArrayLayers: scene.quads.length,\n },\n format: Radiosity.lightmapFormat,\n usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING,\n });\n this.accumulationBuffer = device.createBuffer({\n label: 'Radiosity.accumulationBuffer',\n size:\n Radiosity.lightmapWidth *\n Radiosity.lightmapHeight *\n scene.quads.length *\n 16,\n usage: GPUBufferUsage.STORAGE,\n });\n this.kTotalLightmapTexels =\n Radiosity.lightmapWidth * Radiosity.lightmapHeight * scene.quads.length;\n this.uniformBuffer = device.createBuffer({\n label: 'Radiosity.uniformBuffer',\n size: 8 * 4, // 8 x f32\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Radiosity.bindGroupLayout',\n entries: [\n {\n // accumulation buffer\n binding: 0,\n visibility: GPUShaderStage.COMPUTE,\n buffer: { type: 'storage' },\n },\n {\n // lightmap\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n storageTexture: {\n access: 'write-only',\n format: Radiosity.lightmapFormat,\n viewDimension: '2d-array',\n },\n },\n {\n // radiosity_uniforms\n binding: 2,\n visibility: GPUShaderStage.COMPUTE,\n buffer: { type: 'uniform' },\n },\n ],\n });\n this.bindGroup = device.createBindGroup({\n label: 'Radiosity.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // accumulation buffer\n binding: 0,\n resource: {\n buffer: this.accumulationBuffer,\n size: this.accumulationBuffer.size,\n },\n },\n {\n // lightmap\n binding: 1,\n resource: this.lightmap.createView(),\n },\n {\n // radiosity_uniforms\n binding: 2,\n resource: {\n buffer: this.uniformBuffer,\n size: this.uniformBuffer.size,\n },\n },\n ],\n });\n\n const mod = device.createShaderModule({\n code: radiosityWGSL + common.wgsl,\n });\n const pipelineLayout = device.createPipelineLayout({\n label: 'Radiosity.accumulatePipelineLayout',\n bindGroupLayouts: [common.uniforms.bindGroupLayout, bindGroupLayout],\n });\n\n this.radiosityPipeline = device.createComputePipeline({\n label: 'Radiosity.radiosityPipeline',\n layout: pipelineLayout,\n compute: {\n module: mod,\n entryPoint: 'radiosity',\n constants: {\n PhotonsPerWorkgroup: this.kPhotonsPerWorkgroup,\n PhotonEnergy: this.kPhotonEnergy,\n },\n },\n });\n\n this.accumulationToLightmapPipeline = device.createComputePipeline({\n label: 'Radiosity.accumulationToLightmapPipeline',\n layout: pipelineLayout,\n compute: {\n module: mod,\n entryPoint: 'accumulation_to_lightmap',\n constants: {\n AccumulationToLightmapWorkgroupSizeX:\n this.kAccumulationToLightmapWorkgroupSizeX,\n AccumulationToLightmapWorkgroupSizeY:\n this.kAccumulationToLightmapWorkgroupSizeY,\n },\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n // Calculate the new mean value for the accumulation buffer\n this.accumulationMean +=\n (this.kPhotonsPerFrame * this.kPhotonEnergy) / this.kTotalLightmapTexels;\n\n // Calculate the 'accumulation' -> 'lightmap' scale factor from 'accumulationMean'\n const accumulationToLightmapScale = 1 / this.accumulationMean;\n // If 'accumulationMean' is greater than 'kAccumulationMeanMax', then reduce\n // the 'accumulation' buffer values to prevent u32 overflow.\n const accumulationBufferScale =\n this.accumulationMean > 2 * this.kAccumulationMeanMax ? 0.5 : 1;\n this.accumulationMean *= accumulationBufferScale;\n\n // Update the radiosity uniform buffer data.\n const uniformDataF32 = new Float32Array(this.uniformBuffer.size / 4);\n uniformDataF32[0] = accumulationToLightmapScale;\n uniformDataF32[1] = accumulationBufferScale;\n uniformDataF32[2] = this.scene.lightWidth;\n uniformDataF32[3] = this.scene.lightHeight;\n uniformDataF32[4] = this.scene.lightCenter[0];\n uniformDataF32[5] = this.scene.lightCenter[1];\n uniformDataF32[6] = this.scene.lightCenter[2];\n this.device.queue.writeBuffer(\n this.uniformBuffer,\n 0,\n uniformDataF32.buffer,\n uniformDataF32.byteOffset,\n uniformDataF32.byteLength\n );\n\n // Dispatch the radiosity workgroups\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setBindGroup(0, this.common.uniforms.bindGroup);\n passEncoder.setBindGroup(1, this.bindGroup);\n passEncoder.setPipeline(this.radiosityPipeline);\n passEncoder.dispatchWorkgroups(this.kWorkgroupsPerFrame);\n\n // Then copy the 'accumulation' data to 'lightmap'\n passEncoder.setPipeline(this.accumulationToLightmapPipeline);\n passEncoder.dispatchWorkgroups(\n Math.ceil(\n Radiosity.lightmapWidth / this.kAccumulationToLightmapWorkgroupSizeX\n ),\n Math.ceil(\n Radiosity.lightmapHeight / this.kAccumulationToLightmapWorkgroupSizeY\n ),\n this.lightmap.depthOrArrayLayers\n );\n passEncoder.end();\n }\n}\n"},v.lightmapFormat="rgba16float",v.lightmapWidth=256,v.lightmapHeight=256;class b{run(e){let n=e.beginRenderPass(this.renderPassDescriptor);n.setPipeline(this.pipeline),n.setVertexBuffer(0,this.scene.vertices),n.setIndexBuffer(this.scene.indices,"uint16"),n.setBindGroup(0,this.common.uniforms.bindGroup),n.setBindGroup(1,this.bindGroup),n.drawIndexed(this.scene.indexCount),n.end()}constructor(e,n,t,a,r){this.common=n,this.scene=t;let i=e.createTexture({label:"RasterizerRenderer.depthTexture",size:[r.width,r.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT});this.renderPassDescriptor={label:"RasterizerRenderer.renderPassDescriptor",colorAttachments:[{view:r.createView(),clearValue:[.1,.2,.3,1],loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:i.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}};let o=e.createBindGroupLayout({label:"RasterizerRenderer.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,texture:{viewDimension:"2d-array"}},{binding:1,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,sampler:{}}]});this.bindGroup=e.createBindGroup({label:"RasterizerRenderer.bindGroup",layout:o,entries:[{binding:0,resource:a.lightmap.createView()},{binding:1,resource:e.createSampler({addressModeU:"clamp-to-edge",addressModeV:"clamp-to-edge",magFilter:"linear",minFilter:"linear"})}]});let u=e.createShaderModule({label:"RasterizerRenderer.module",code:s+n.wgsl});this.pipeline=e.createRenderPipeline({label:"RasterizerRenderer.pipeline",layout:e.createPipelineLayout({bindGroupLayouts:[n.uniforms.bindGroupLayout,o]}),vertex:{module:u,entryPoint:"vs_main",buffers:t.vertexBufferLayout},fragment:{module:u,entryPoint:"fs_main",targets:[{format:r.format}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}})}}b.sourceInfo={name:"src/sample/cornell/rasterizer.ts".substring(19),contents:"import rasterizerWGSL from './rasterizer.wgsl';\n\nimport Common from './common';\nimport Radiosity from './radiosity';\nimport Scene from './scene';\n\n/**\n * Rasterizer renders the scene using a regular raserization graphics pipeline.\n */\nexport default class Rasterizer {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n private readonly common: Common;\n private readonly scene: Scene;\n private readonly renderPassDescriptor: GPURenderPassDescriptor;\n private readonly pipeline: GPURenderPipeline;\n private readonly bindGroup: GPUBindGroup;\n\n constructor(\n device: GPUDevice,\n common: Common,\n scene: Scene,\n radiosity: Radiosity,\n framebuffer: GPUTexture\n ) {\n this.common = common;\n this.scene = scene;\n\n const depthTexture = device.createTexture({\n label: 'RasterizerRenderer.depthTexture',\n size: [framebuffer.width, framebuffer.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n this.renderPassDescriptor = {\n label: 'RasterizerRenderer.renderPassDescriptor',\n colorAttachments: [\n {\n view: framebuffer.createView(),\n clearValue: [0.1, 0.2, 0.3, 1],\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'RasterizerRenderer.bindGroupLayout',\n entries: [\n {\n // lightmap\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n texture: { viewDimension: '2d-array' },\n },\n {\n // sampler\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n sampler: {},\n },\n ],\n });\n\n this.bindGroup = device.createBindGroup({\n label: 'RasterizerRenderer.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // lightmap\n binding: 0,\n resource: radiosity.lightmap.createView(),\n },\n {\n // sampler\n binding: 1,\n resource: device.createSampler({\n addressModeU: 'clamp-to-edge',\n addressModeV: 'clamp-to-edge',\n magFilter: 'linear',\n minFilter: 'linear',\n }),\n },\n ],\n });\n\n const mod = device.createShaderModule({\n label: 'RasterizerRenderer.module',\n code: rasterizerWGSL + common.wgsl,\n });\n\n this.pipeline = device.createRenderPipeline({\n label: 'RasterizerRenderer.pipeline',\n layout: device.createPipelineLayout({\n bindGroupLayouts: [common.uniforms.bindGroupLayout, bindGroupLayout],\n }),\n vertex: {\n module: mod,\n entryPoint: 'vs_main',\n buffers: scene.vertexBufferLayout,\n },\n fragment: {\n module: mod,\n entryPoint: 'fs_main',\n targets: [{ format: framebuffer.format }],\n },\n primitive: {\n topology: 'triangle-list',\n cullMode: 'back',\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n const passEncoder = commandEncoder.beginRenderPass(\n this.renderPassDescriptor\n );\n passEncoder.setPipeline(this.pipeline);\n passEncoder.setVertexBuffer(0, this.scene.vertices);\n passEncoder.setIndexBuffer(this.scene.indices, 'uint16');\n passEncoder.setBindGroup(0, this.common.uniforms.bindGroup);\n passEncoder.setBindGroup(1, this.bindGroup);\n passEncoder.drawIndexed(this.scene.indexCount);\n passEncoder.end();\n }\n}\n"};class y{run(e){let n=e.beginComputePass();n.setBindGroup(0,this.bindGroup),n.setPipeline(this.pipeline),n.dispatchWorkgroups(Math.ceil(this.width/this.kWorkgroupSizeX),Math.ceil(this.height/this.kWorkgroupSizeY)),n.end()}constructor(e,n,t,a){this.kWorkgroupSizeX=16,this.kWorkgroupSizeY=16,this.width=t.width,this.height=t.height;let r=e.createBindGroupLayout({label:"Tonemapper.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.COMPUTE,texture:{viewDimension:"2d"}},{binding:1,visibility:GPUShaderStage.COMPUTE,storageTexture:{access:"write-only",format:a.format,viewDimension:"2d"}}]});this.bindGroup=e.createBindGroup({label:"Tonemapper.bindGroup",layout:r,entries:[{binding:0,resource:t.createView()},{binding:1,resource:a.createView()}]});let i=e.createShaderModule({code:c.replace("{OUTPUT_FORMAT}",a.format)+n.wgsl}),o=e.createPipelineLayout({label:"Tonemap.pipelineLayout",bindGroupLayouts:[r]});this.pipeline=e.createComputePipeline({label:"Tonemap.pipeline",layout:o,compute:{module:i,entryPoint:"main",constants:{WorkgroupSizeX:this.kWorkgroupSizeX,WorkgroupSizeY:this.kWorkgroupSizeY}}})}}y.sourceInfo={name:"src/sample/cornell/tonemapper.ts".substring(19),contents:"import Common from './common';\nimport tonemapperWGSL from './tonemapper.wgsl';\n\n/**\n * Tonemapper implements a tonemapper to convert a linear-light framebuffer to\n * a gamma-correct, tonemapped framebuffer used for presentation.\n */\nexport default class Tonemapper {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n private readonly bindGroup: GPUBindGroup;\n private readonly pipeline: GPUComputePipeline;\n private readonly width: number;\n private readonly height: number;\n private readonly kWorkgroupSizeX = 16;\n private readonly kWorkgroupSizeY = 16;\n\n constructor(\n device: GPUDevice,\n common: Common,\n input: GPUTexture,\n output: GPUTexture\n ) {\n this.width = input.width;\n this.height = input.height;\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Tonemapper.bindGroupLayout',\n entries: [\n {\n // input\n binding: 0,\n visibility: GPUShaderStage.COMPUTE,\n texture: {\n viewDimension: '2d',\n },\n },\n {\n // output\n binding: 1,\n visibility: GPUShaderStage.COMPUTE,\n storageTexture: {\n access: 'write-only',\n format: output.format,\n viewDimension: '2d',\n },\n },\n ],\n });\n this.bindGroup = device.createBindGroup({\n label: 'Tonemapper.bindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n // input\n binding: 0,\n resource: input.createView(),\n },\n {\n // output\n binding: 1,\n resource: output.createView(),\n },\n ],\n });\n\n const mod = device.createShaderModule({\n code:\n tonemapperWGSL.replace('{OUTPUT_FORMAT}', output.format) + common.wgsl,\n });\n const pipelineLayout = device.createPipelineLayout({\n label: 'Tonemap.pipelineLayout',\n bindGroupLayouts: [bindGroupLayout],\n });\n\n this.pipeline = device.createComputePipeline({\n label: 'Tonemap.pipeline',\n layout: pipelineLayout,\n compute: {\n module: mod,\n entryPoint: 'main',\n constants: {\n WorkgroupSizeX: this.kWorkgroupSizeX,\n WorkgroupSizeY: this.kWorkgroupSizeY,\n },\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setBindGroup(0, this.bindGroup);\n passEncoder.setPipeline(this.pipeline);\n passEncoder.dispatchWorkgroups(\n Math.ceil(this.width / this.kWorkgroupSizeX),\n Math.ceil(this.height / this.kWorkgroupSizeY)\n );\n passEncoder.end();\n }\n}\n"};class x{run(e){let n=e.beginComputePass();n.setPipeline(this.pipeline),n.setBindGroup(0,this.common.uniforms.bindGroup),n.setBindGroup(1,this.bindGroup),n.dispatchWorkgroups(Math.ceil(this.framebuffer.width/this.kWorkgroupSizeX),Math.ceil(this.framebuffer.height/this.kWorkgroupSizeY)),n.end()}constructor(e,n,t,a){this.kWorkgroupSizeX=16,this.kWorkgroupSizeY=16,this.common=n,this.framebuffer=a;let r=e.createBindGroupLayout({label:"Raytracer.bindGroupLayout",entries:[{binding:0,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,texture:{viewDimension:"2d-array"}},{binding:1,visibility:GPUShaderStage.FRAGMENT|GPUShaderStage.COMPUTE,sampler:{}},{binding:2,visibility:GPUShaderStage.COMPUTE,storageTexture:{access:"write-only",format:a.format,viewDimension:"2d"}}]});this.bindGroup=e.createBindGroup({label:"rendererBindGroup",layout:r,entries:[{binding:0,resource:t.lightmap.createView()},{binding:1,resource:e.createSampler({addressModeU:"clamp-to-edge",addressModeV:"clamp-to-edge",addressModeW:"clamp-to-edge",magFilter:"linear",minFilter:"linear"})},{binding:2,resource:a.createView()}]}),this.pipeline=e.createComputePipeline({label:"raytracerPipeline",layout:e.createPipelineLayout({bindGroupLayouts:[n.uniforms.bindGroupLayout,r]}),compute:{module:e.createShaderModule({code:u+n.wgsl}),entryPoint:"main",constants:{WorkgroupSizeX:this.kWorkgroupSizeX,WorkgroupSizeY:this.kWorkgroupSizeY}}})}}x.sourceInfo={name:"src/sample/cornell/raytracer.ts".substring(19),contents:"import raytracerWGSL from './raytracer.wgsl';\n\nimport Common from './common';\nimport Radiosity from './radiosity';\n\n/**\n * Raytracer renders the scene using a software ray-tracing compute pipeline.\n */\nexport default class Raytracer {\n static sourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n };\n\n private readonly common: Common;\n private readonly framebuffer: GPUTexture;\n private readonly pipeline: GPUComputePipeline;\n private readonly bindGroup: GPUBindGroup;\n\n private readonly kWorkgroupSizeX = 16;\n private readonly kWorkgroupSizeY = 16;\n\n constructor(\n device: GPUDevice,\n common: Common,\n radiosity: Radiosity,\n framebuffer: GPUTexture\n ) {\n this.common = common;\n this.framebuffer = framebuffer;\n const bindGroupLayout = device.createBindGroupLayout({\n label: 'Raytracer.bindGroupLayout',\n entries: [\n {\n // lightmap\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n texture: { viewDimension: '2d-array' },\n },\n {\n // sampler\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n sampler: {},\n },\n {\n // framebuffer\n binding: 2,\n visibility: GPUShaderStage.COMPUTE,\n storageTexture: {\n access: 'write-only',\n format: framebuffer.format,\n viewDimension: '2d',\n },\n },\n ],\n });\n\n this.bindGroup = device.createBindGroup({\n label: 'rendererBindGroup',\n layout: bindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: radiosity.lightmap.createView(),\n },\n {\n binding: 1,\n resource: device.createSampler({\n addressModeU: 'clamp-to-edge',\n addressModeV: 'clamp-to-edge',\n addressModeW: 'clamp-to-edge',\n magFilter: 'linear',\n minFilter: 'linear',\n }),\n },\n {\n binding: 2,\n resource: framebuffer.createView(),\n },\n ],\n });\n\n this.pipeline = device.createComputePipeline({\n label: 'raytracerPipeline',\n layout: device.createPipelineLayout({\n bindGroupLayouts: [common.uniforms.bindGroupLayout, bindGroupLayout],\n }),\n compute: {\n module: device.createShaderModule({\n code: raytracerWGSL + common.wgsl,\n }),\n entryPoint: 'main',\n constants: {\n WorkgroupSizeX: this.kWorkgroupSizeX,\n WorkgroupSizeY: this.kWorkgroupSizeY,\n },\n },\n });\n }\n\n run(commandEncoder: GPUCommandEncoder) {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(this.pipeline);\n passEncoder.setBindGroup(0, this.common.uniforms.bindGroup);\n passEncoder.setBindGroup(1, this.bindGroup);\n passEncoder.dispatchWorkgroups(\n Math.ceil(this.framebuffer.width / this.kWorkgroupSizeX),\n Math.ceil(this.framebuffer.height / this.kWorkgroupSizeY)\n );\n passEncoder.end();\n }\n}\n"};var P="src/sample/cornell/main.ts";let _=async e=>{let{canvas:n,pageState:t,gui:a}=e,r=navigator.gpu.getPreferredCanvasFormat(),i="bgra8unorm"===r?["bgra8unorm-storage"]:[],o=await navigator.gpu.requestAdapter();for(let s of i)if(!o.features.has(s))throw Error("sample requires ".concat(s,", but is not supported by the adapter"));let u=await o.requestDevice({requiredFeatures:i});if(!t.active)return;let c={renderer:"rasterizer",rotateCamera:!0};a.add(c,"renderer",["rasterizer","raytracer"]),a.add(c,"rotateCamera",!0);let l=window.devicePixelRatio;n.width=n.clientWidth*l,n.height=n.clientHeight*l;let d=n.getContext("webgpu");d.configure({device:u,format:r,usage:GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.STORAGE_BINDING,alphaMode:"premultiplied"});let m=u.createTexture({label:"framebuffer",size:[n.width,n.height],format:"rgba16float",usage:GPUTextureUsage.RENDER_ATTACHMENT|GPUTextureUsage.STORAGE_BINDING|GPUTextureUsage.TEXTURE_BINDING}),f=new h(u),p=new g(u,f.quadBuffer),P=new v(u,p,f),_=new b(u,p,f,P,m),G=new x(u,p,P,m);requestAnimationFrame(function e(){if(!t.active)return;let a=d.getCurrentTexture(),r=u.createCommandEncoder();switch(p.update({rotateCamera:c.rotateCamera,aspect:n.width/n.height}),P.run(r),c.renderer){case"rasterizer":_.run(r);break;case"raytracer":G.run(r)}let i=new y(u,p,m,a);i.run(r),u.queue.submit([r.finish()]),requestAnimationFrame(e)})},G=()=>(0,i.Tl)({name:"Cornell box",description:"A classic Cornell box, using a lightmap generated using software ray-tracing.",gui:!0,init:_,sources:[{name:P.substring(19),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport radiosityWGSL from './radiosity.wgsl';\nimport rasterizerWGSL from './rasterizer.wgsl';\nimport raytracerWGSL from './raytracer.wgsl';\nimport tonemapperWGSL from './tonemapper.wgsl';\nimport commonWGSL from './common.wgsl';\nimport Scene from './scene';\nimport Common from './common';\nimport Radiosity from './radiosity';\nimport Rasterizer from './rasterizer';\nimport Tonemapper from './tonemapper';\nimport Raytracer from './raytracer';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n const requiredFeatures: GPUFeatureName[] =\n presentationFormat === 'bgra8unorm' ? ['bgra8unorm-storage'] : [];\n const adapter = await navigator.gpu.requestAdapter();\n for (const feature of requiredFeatures) {\n if (!adapter.features.has(feature)) {\n throw new Error(\n `sample requires ${feature}, but is not supported by the adapter`\n );\n }\n }\n const device = await adapter.requestDevice({ requiredFeatures });\n\n if (!pageState.active) return;\n\n const params: {\n renderer: 'rasterizer' | 'raytracer';\n rotateCamera: boolean;\n } = {\n renderer: 'rasterizer',\n rotateCamera: true,\n };\n\n gui.add(params, 'renderer', ['rasterizer', 'raytracer']);\n gui.add(params, 'rotateCamera', true);\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n context.configure({\n device,\n format: presentationFormat,\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.STORAGE_BINDING,\n alphaMode: 'premultiplied',\n });\n\n const framebuffer = device.createTexture({\n label: 'framebuffer',\n size: [canvas.width, canvas.height],\n format: 'rgba16float',\n usage:\n GPUTextureUsage.RENDER_ATTACHMENT |\n GPUTextureUsage.STORAGE_BINDING |\n GPUTextureUsage.TEXTURE_BINDING,\n });\n\n const scene = new Scene(device);\n const common = new Common(device, scene.quadBuffer);\n const radiosity = new Radiosity(device, common, scene);\n const rasterizer = new Rasterizer(\n device,\n common,\n scene,\n radiosity,\n framebuffer\n );\n const raytracer = new Raytracer(device, common, radiosity, framebuffer);\n\n function frame() {\n if (!pageState.active) {\n // Sample is no longer the active page.\n return;\n }\n\n const canvasTexture = context.getCurrentTexture();\n const commandEncoder = device.createCommandEncoder();\n\n common.update({\n rotateCamera: params.rotateCamera,\n aspect: canvas.width / canvas.height,\n });\n radiosity.run(commandEncoder);\n\n switch (params.renderer) {\n case 'rasterizer': {\n rasterizer.run(commandEncoder);\n break;\n }\n case 'raytracer': {\n raytracer.run(commandEncoder);\n break;\n }\n }\n\n const tonemapper = new Tonemapper(\n device,\n common,\n framebuffer,\n canvasTexture\n );\n tonemapper.run(commandEncoder);\n\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n\n requestAnimationFrame(frame);\n};\n\nconst CornellBox: () => JSX.Element = () =>\n makeSample({\n name: 'Cornell box',\n description:\n 'A classic Cornell box, using a lightmap generated using software ray-tracing.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n Common.sourceInfo,\n Scene.sourceInfo,\n Radiosity.sourceInfo,\n Rasterizer.sourceInfo,\n Raytracer.sourceInfo,\n Tonemapper.sourceInfo,\n {\n name: './radiosity.wgsl',\n contents: radiosityWGSL,\n editable: true,\n },\n {\n name: './rasterizer.wgsl',\n contents: rasterizerWGSL,\n editable: true,\n },\n {\n name: './raytracer.wgsl',\n contents: raytracerWGSL,\n editable: true,\n },\n {\n name: './tonemapper.wgsl',\n contents: tonemapperWGSL,\n editable: true,\n },\n {\n name: './common.wgsl',\n contents: commonWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default CornellBox;\n"},g.sourceInfo,h.sourceInfo,v.sourceInfo,b.sourceInfo,x.sourceInfo,y.sourceInfo,{name:"./radiosity.wgsl",contents:o,editable:!0},{name:"./rasterizer.wgsl",contents:s,editable:!0},{name:"./raytracer.wgsl",contents:u,editable:!0},{name:"./tonemapper.wgsl",contents:c,editable:!0},{name:"./common.wgsl",contents:l,editable:!0}],filename:P});var S=G},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileScrollContainer:"SampleLayout_sourceFileScrollContainer__LsNEm",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/webpack-414175c7d7efd871.js b/_next/static/chunks/webpack-414175c7d7efd871.js deleted file mode 100644 index 20b7b3d9..00000000 --- a/_next/static/chunks/webpack-414175c7d7efd871.js +++ /dev/null @@ -1 +0,0 @@ -!function(){"use strict";var e,t,r,n,a,f,c,o,i,u,d={},b={};function l(e){var t=b[e];if(void 0!==t)return t.exports;var r=b[e]={exports:{}},n=!0;try{d[e].call(r.exports,r,r.exports,l),n=!1}finally{n&&delete b[e]}return r.exports}l.m=d,e=[],l.O=function(t,r,n,a){if(r){a=a||0;for(var f=e.length;f>0&&e[f-1][2]>a;f--)e[f]=e[f-1];e[f]=[r,n,a];return}for(var c=1/0,f=0;f=a&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(o=!1,a0&&e[f-1][2]>n;f--)e[f]=e[f-1];e[f]=[a,r,n];return}for(var c=1/0,f=0;f=n&&Object.keys(l.O).every(function(e){return l.O[e](a[i])})?a.splice(i--,1):(o=!1,nWebGPU Samples \ No newline at end of file +WebGPU Samples \ No newline at end of file diff --git a/samples/A-buffer.html b/samples/A-buffer.html index 6f3e5bad..e7e839a9 100644 --- a/samples/A-buffer.html +++ b/samples/A-buffer.html @@ -10,6 +10,6 @@ } A-Buffer - WebGPU Samples

A-Buffer

See it on Github!

Demonstrates order independent transparency using a per-pixel + limiting memory usage (when required)."/>

\ No newline at end of file + limiting memory usage (when required).

\ No newline at end of file diff --git a/samples/animometer.html b/samples/animometer.html index 96ac7c3d..a6c95d90 100644 --- a/samples/animometer.html +++ b/samples/animometer.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Animometer - WebGPU Samples \ No newline at end of file + Animometer - WebGPU Samples \ No newline at end of file diff --git a/samples/bitonicSort.html b/samples/bitonicSort.html index e51e4b2e..eb2152da 100644 --- a/samples/bitonicSort.html +++ b/samples/bitonicSort.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each dispatch of the bitonic sort shader dispatches a workgroup containing elements/2 invocations. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file + Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each dispatch of the bitonic sort shader dispatches a workgroup containing elements/2 invocations. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file diff --git a/samples/cameras.html b/samples/cameras.html index 486b8999..db89ce35 100644 --- a/samples/cameras.html +++ b/samples/cameras.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cameras - WebGPU Samples \ No newline at end of file + Cameras - WebGPU Samples \ No newline at end of file diff --git a/samples/computeBoids.html b/samples/computeBoids.html index af4bea8f..fad09136 100644 --- a/samples/computeBoids.html +++ b/samples/computeBoids.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Compute Boids - WebGPU Samples \ No newline at end of file + Compute Boids - WebGPU Samples \ No newline at end of file diff --git a/samples/cornell.html b/samples/cornell.html index 25ea7d60..40a77401 100644 --- a/samples/cornell.html +++ b/samples/cornell.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cornell box - WebGPU Samples \ No newline at end of file + Cornell box - WebGPU Samples \ No newline at end of file diff --git a/samples/cubemap.html b/samples/cubemap.html index fbeee96c..49cad9d8 100644 --- a/samples/cubemap.html +++ b/samples/cubemap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cubemap - WebGPU Samples \ No newline at end of file + Cubemap - WebGPU Samples \ No newline at end of file diff --git a/samples/deferredRendering.html b/samples/deferredRendering.html index bd17c00e..f13ed107 100644 --- a/samples/deferredRendering.html +++ b/samples/deferredRendering.html @@ -16,7 +16,7 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. - "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. + "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. Render geometry info to multiple targets in the gBuffers in the first pass. In this sample we have 2 gBuffers for normals and albedo, along with a depth texture. And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity. @@ -24,4 +24,4 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/fractalCube.html b/samples/fractalCube.html index f6884b46..e3fad969 100644 --- a/samples/fractalCube.html +++ b/samples/fractalCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Fractal Cube - WebGPU Samples \ No newline at end of file + Fractal Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/gameOfLife.html b/samples/gameOfLife.html index b5d1159d..e6cf15f9 100644 --- a/samples/gameOfLife.html +++ b/samples/gameOfLife.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Conway's Game of Life - WebGPU Samples \ No newline at end of file + Conway's Game of Life - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangle.html b/samples/helloTriangle.html index 63e9c7fd..f4f58502 100644 --- a/samples/helloTriangle.html +++ b/samples/helloTriangle.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle - WebGPU Samples \ No newline at end of file + Hello Triangle - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangleMSAA.html b/samples/helloTriangleMSAA.html index 9ae56c72..ebee6828 100644 --- a/samples/helloTriangleMSAA.html +++ b/samples/helloTriangleMSAA.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle MSAA - WebGPU Samples \ No newline at end of file + Hello Triangle MSAA - WebGPU Samples \ No newline at end of file diff --git a/samples/imageBlur.html b/samples/imageBlur.html index dab0b07a..c2153d7d 100644 --- a/samples/imageBlur.html +++ b/samples/imageBlur.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Image Blur - WebGPU Samples \ No newline at end of file + Image Blur - WebGPU Samples \ No newline at end of file diff --git a/samples/instancedCube.html b/samples/instancedCube.html index 69fcf597..f359f9cc 100644 --- a/samples/instancedCube.html +++ b/samples/instancedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Instanced Cube - WebGPU Samples \ No newline at end of file + Instanced Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/normalMap.html b/samples/normalMap.html index 2f8656cb..a4567ced 100644 --- a/samples/normalMap.html +++ b/samples/normalMap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Normal Mapping - WebGPU Samples \ No newline at end of file + Normal Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/particles.html b/samples/particles.html index dcddf02a..de12a803 100644 --- a/samples/particles.html +++ b/samples/particles.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Particles - WebGPU Samples \ No newline at end of file + Particles - WebGPU Samples \ No newline at end of file diff --git a/samples/renderBundles.html b/samples/renderBundles.html index 28195a52..b0f77353 100644 --- a/samples/renderBundles.html +++ b/samples/renderBundles.html @@ -11,7 +11,7 @@ Render Bundles - WebGPU Samples

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of + of instancing to reduce draw overhead.)"/>

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of meshes individually as a proxy for a more complex scene in order to demonstrate the reduction in JavaScript time spent to issue render commands. (Typically a scene like this would make use - of instancing to reduce draw overhead.)

\ No newline at end of file + of instancing to reduce draw overhead.)

\ No newline at end of file diff --git a/samples/resizeCanvas.html b/samples/resizeCanvas.html index 3c6ddca0..568fe53d 100644 --- a/samples/resizeCanvas.html +++ b/samples/resizeCanvas.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Resize Canvas - WebGPU Samples \ No newline at end of file + Resize Canvas - WebGPU Samples \ No newline at end of file diff --git a/samples/reversedZ.html b/samples/reversedZ.html index 48c3584b..c8dd9e0a 100644 --- a/samples/reversedZ.html +++ b/samples/reversedZ.html @@ -17,7 +17,7 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://web.archive.org/web/20220724174000/https://thxforthefish.com/posts/reverse_z/ - "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. + "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. The left column uses regular method, while the right one uses reversed z technique. Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other. Higher sets are placed further from camera (and are scaled for better visual purpose). @@ -26,4 +26,4 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://web.archive.org/web/20220724174000/https://thxforthefish.com/posts/reverse_z/ -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/rotatingCube.html b/samples/rotatingCube.html index 2215a197..09a45ce0 100644 --- a/samples/rotatingCube.html +++ b/samples/rotatingCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Rotating Cube - WebGPU Samples \ No newline at end of file + Rotating Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/samplerParameters.html b/samples/samplerParameters.html index 1b437830..2eaa1523 100644 --- a/samples/samplerParameters.html +++ b/samples/samplerParameters.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file + Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file diff --git a/samples/shadowMapping.html b/samples/shadowMapping.html index a4d0cc10..7ef748e1 100644 --- a/samples/shadowMapping.html +++ b/samples/shadowMapping.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Shadow Mapping - WebGPU Samples \ No newline at end of file + Shadow Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/texturedCube.html b/samples/texturedCube.html index 26aca4a2..13da947d 100644 --- a/samples/texturedCube.html +++ b/samples/texturedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Textured Cube - WebGPU Samples \ No newline at end of file + Textured Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/twoCubes.html b/samples/twoCubes.html index 7e2f9ee0..fc837a68 100644 --- a/samples/twoCubes.html +++ b/samples/twoCubes.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Two Cubes - WebGPU Samples \ No newline at end of file + Two Cubes - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploading.html b/samples/videoUploading.html index d2a50ec3..1d33f0e5 100644 --- a/samples/videoUploading.html +++ b/samples/videoUploading.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading - WebGPU Samples \ No newline at end of file + Video Uploading - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploadingWebCodecs.html b/samples/videoUploadingWebCodecs.html index 074de4fc..fdc6d550 100644 --- a/samples/videoUploadingWebCodecs.html +++ b/samples/videoUploadingWebCodecs.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file + Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file diff --git a/samples/worker.html b/samples/worker.html index d362d1ce..f20cb390 100644 --- a/samples/worker.html +++ b/samples/worker.html @@ -10,6 +10,6 @@ } WebGPU in a Worker - WebGPU Samples

WebGPU in a Worker

See it on Github!

This example shows one method of using WebGPU in a web worker and presenting to + which is then transferred to the worker where all the WebGPU calls are made."/>

\ No newline at end of file + which is then transferred to the worker where all the WebGPU calls are made.

\ No newline at end of file