From 285b0e4e25e76756216e9724cc016cbbab883f75 Mon Sep 17 00:00:00 2001 From: shrekshao <5031596+shrekshao@users.noreply.github.com> Date: Thu, 23 Nov 2023 19:45:28 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20=20@=202d193?= =?UTF-8?q?cb2764c736f435ce748630fae5f29f7518d=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 404.html | 4 ++-- .../samples/A-buffer.json | 0 .../samples/animometer.json | 0 .../samples/bitonicSort.json | 0 .../samples/cameras.json | 0 .../samples/computeBoids.json | 0 .../samples/cornell.json | 0 .../samples/cubemap.json | 0 .../samples/deferredRendering.json | 0 .../samples/fractalCube.json | 0 .../samples/gameOfLife.json | 0 .../samples/helloTriangle.json | 0 .../samples/helloTriangleMSAA.json | 0 .../samples/imageBlur.json | 0 .../samples/instancedCube.json | 0 .../samples/normalMap.json | 0 .../samples/particles.json | 0 .../samples/renderBundles.json | 0 .../samples/resizeCanvas.json | 0 .../samples/reversedZ.json | 0 .../samples/rotatingCube.json | 0 .../samples/samplerParameters.json | 0 .../samples/shadowMapping.json | 0 .../samples/texturedCube.json | 0 .../samples/twoCubes.json | 0 .../samples/videoUploading.json | 0 .../samples/videoUploadingWebCodecs.json | 0 .../samples/worker.json | 0 _next/static/chunks/704.31a64194a8c6952d.js | 1 + _next/static/chunks/704.3b4b79ac2db1162c.js | 1 - ...ebpack-2fbd1f7b2788aa97.js => webpack-af8ade16c1360358.js} | 2 +- .../_buildManifest.js | 0 .../_ssgManifest.js | 0 index.html | 2 +- samples/A-buffer.html | 4 ++-- samples/animometer.html | 2 +- samples/bitonicSort.html | 2 +- samples/cameras.html | 2 +- samples/computeBoids.html | 2 +- samples/cornell.html | 2 +- samples/cubemap.html | 2 +- samples/deferredRendering.html | 4 ++-- samples/fractalCube.html | 2 +- samples/gameOfLife.html | 2 +- samples/helloTriangle.html | 2 +- samples/helloTriangleMSAA.html | 2 +- samples/imageBlur.html | 2 +- samples/instancedCube.html | 2 +- samples/normalMap.html | 2 +- samples/particles.html | 2 +- samples/renderBundles.html | 4 ++-- samples/resizeCanvas.html | 2 +- samples/reversedZ.html | 4 ++-- samples/rotatingCube.html | 2 +- samples/samplerParameters.html | 2 +- samples/shadowMapping.html | 2 +- samples/texturedCube.html | 2 +- samples/twoCubes.html | 2 +- samples/videoUploading.html | 2 +- samples/videoUploadingWebCodecs.html | 2 +- samples/worker.html | 4 ++-- 61 files changed, 37 insertions(+), 37 deletions(-) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/A-buffer.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/animometer.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/bitonicSort.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/cameras.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/computeBoids.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/cornell.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/cubemap.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/deferredRendering.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/fractalCube.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/gameOfLife.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/helloTriangle.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/helloTriangleMSAA.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/imageBlur.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/instancedCube.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/normalMap.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/particles.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/renderBundles.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/resizeCanvas.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/reversedZ.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/rotatingCube.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/samplerParameters.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/shadowMapping.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/texturedCube.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/twoCubes.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/videoUploading.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/videoUploadingWebCodecs.json (100%) rename _next/data/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/samples/worker.json (100%) create mode 100644 _next/static/chunks/704.31a64194a8c6952d.js delete mode 100644 _next/static/chunks/704.3b4b79ac2db1162c.js rename _next/static/chunks/{webpack-2fbd1f7b2788aa97.js => webpack-af8ade16c1360358.js} (98%) rename _next/static/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/_buildManifest.js (100%) rename _next/static/{mnm4syQEmfDvLZunXl9f2 => ui8IQ4Jl_-GiewJ4GZre6}/_ssgManifest.js (100%) diff --git a/404.html b/404.html index 9d7184b6..63f25ddf 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/A-buffer.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/A-buffer.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/A-buffer.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/A-buffer.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/animometer.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/animometer.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/animometer.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/animometer.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/bitonicSort.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/bitonicSort.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/bitonicSort.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/bitonicSort.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/cameras.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cameras.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/cameras.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cameras.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/computeBoids.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/computeBoids.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/computeBoids.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/computeBoids.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/cornell.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cornell.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/cornell.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cornell.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/cubemap.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cubemap.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/cubemap.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/cubemap.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/deferredRendering.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/deferredRendering.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/deferredRendering.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/deferredRendering.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/fractalCube.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/fractalCube.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/fractalCube.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/fractalCube.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/gameOfLife.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/gameOfLife.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/gameOfLife.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/gameOfLife.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/helloTriangle.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangle.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/helloTriangle.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangle.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/helloTriangleMSAA.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangleMSAA.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/helloTriangleMSAA.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/helloTriangleMSAA.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/imageBlur.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/imageBlur.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/imageBlur.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/imageBlur.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/instancedCube.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/instancedCube.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/instancedCube.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/instancedCube.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/normalMap.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/normalMap.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/normalMap.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/normalMap.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/particles.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/particles.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/particles.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/particles.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/renderBundles.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/renderBundles.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/renderBundles.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/renderBundles.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/resizeCanvas.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/resizeCanvas.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/resizeCanvas.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/resizeCanvas.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/reversedZ.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/reversedZ.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/reversedZ.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/reversedZ.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/rotatingCube.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/rotatingCube.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/rotatingCube.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/rotatingCube.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/samplerParameters.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/samplerParameters.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/samplerParameters.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/samplerParameters.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/shadowMapping.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/shadowMapping.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/shadowMapping.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/shadowMapping.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/texturedCube.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/texturedCube.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/texturedCube.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/texturedCube.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/twoCubes.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/twoCubes.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/twoCubes.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/twoCubes.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/videoUploading.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploading.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/videoUploading.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploading.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/videoUploadingWebCodecs.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploadingWebCodecs.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/videoUploadingWebCodecs.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/videoUploadingWebCodecs.json diff --git a/_next/data/mnm4syQEmfDvLZunXl9f2/samples/worker.json b/_next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/worker.json similarity index 100% rename from _next/data/mnm4syQEmfDvLZunXl9f2/samples/worker.json rename to _next/data/ui8IQ4Jl_-GiewJ4GZre6/samples/worker.json diff --git a/_next/static/chunks/704.31a64194a8c6952d.js b/_next/static/chunks/704.31a64194a8c6952d.js new file mode 100644 index 00000000..2423cfeb --- /dev/null +++ b/_next/static/chunks/704.31a64194a8c6952d.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[704],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return c}});var r=t(5893),i=t(9008),a=t.n(i),o=t(1163),s=t(7294),u=t(9147),f=t.n(u);t(7319);let d=e=>{let n=(0,s.useRef)(null),i=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:i}=e;return{name:n,...function(e){let n;let i=null;{i=document.createElement("div");let a=t(4631);n=a(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){i&&t&&(t.appendChild(i),n.setOption("value",e))}})})}}}(i)}}),e.sources),u=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),r=new n.GUI({autoPlace:!1});return r.domElement.style.position="relative",r.domElement.style.zIndex="1000",r}},[]),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),g=(0,o.useRouter)(),m=g.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[p,h]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?x(m[1]):x(i[0].name),d&&u.current)for(u.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();c&&l.current&&(c.dom.style.position="absolute",c.showPanel(1),l.current.appendChild(c.dom));let t={active:!0},r=()=>{t.active=!1};try{let a=n.current;if(!a)throw Error("The canvas is not available");let o=e.init({canvas:a,pageState:t,gui:d,stats:c});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(a(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),p?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(p)})]}):null]}),(0,r.jsxs)("div",{className:f().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:f().sourceFileNav,children:(0,r.jsx)("ul",{children:i.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),i.map((e,n)=>(0,r.jsx)(e.Container,{className:f().sourceFileContainer,"data-active":v==e.name},n))]})]})},l=e=>(0,r.jsx)(d,{...e});function c(e,n){if(!e)throw Error(n)}},6888:function(e,n,t){"use strict";t.d(n,{W:function(){return a}});var r=t(6906),i=t(9385);let a={positions:r.m,triangles:r.g,normals:[],uvs:[]};a.normals=(0,i.b)(a.positions,a.triangles),a.uvs=(0,i.q)(a.positions,"xy"),a.triangles.push([a.positions.length,a.positions.length+2,a.positions.length+1],[a.positions.length,a.positions.length+1,a.positions.length+3]),a.positions.push([-100,20,-100],[100,20,100],[-100,20,100],[100,20,-100]),a.normals.push([0,1,0],[0,1,0],[0,1,0],[0,1,0]),a.uvs.push([0,0],[1,1],[0,1],[1,0])},9385:function(e,n,t){"use strict";t.d(n,{b:function(){return i},q:function(){return o}});var r=t(6416);function i(e,n){let t=e.map(()=>[0,0,0]);return n.forEach(n=>{let[i,a,o]=n,s=e[i],u=e[a],f=e[o],d=r.R3.subtract(u,s),l=r.R3.subtract(f,s);r.R3.normalize(d,d),r.R3.normalize(l,l);let c=r.R3.cross(d,l);r.R3.add(t[i],c,t[i]),r.R3.add(t[a],c,t[a]),r.R3.add(t[o],c,t[o])}),t.forEach(e=>{r.R3.normalize(e,e)}),t}let a={xy:[0,1],xz:[0,2],yz:[1,2]};function o(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"xy",t=a[n],r=e.map(()=>[0,0]),i=[1/0,1/0],o=[-1/0,-1/0];return e.forEach((e,n)=>{r[n][0]=e[t[0]],r[n][1]=e[t[1]],i[0]=Math.min(e[t[0]],i[0]),i[1]=Math.min(e[t[1]],i[1]),o[0]=Math.max(e[t[0]],o[0]),o[1]=Math.max(e[t[1]],o[1])}),r.forEach(e=>{e[0]=(e[0]-i[0])/(o[0]-i[0]),e[1]=(e[1]-i[1])/(o[1]-i[1])}),r}},9704:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return v}});var r=t(5671),i=t(6416),a=t(6888),o="struct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(0) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\n@group(0) @binding(1) var config: Config;\n\nstruct LightExtent {\n min : vec4,\n max : vec4,\n}\n@group(0) @binding(2) var lightExtent: LightExtent;\n\n@compute @workgroup_size(64, 1, 1)\nfn main(@builtin(global_invocation_id) GlobalInvocationID : vec3) {\n var index = GlobalInvocationID.x;\n if (index >= config.numLights) {\n return;\n }\n\n lightsBuffer.lights[index].position.y = lightsBuffer.lights[index].position.y - 0.5 - 0.003 * (f32(index) - 64.0 * floor(f32(index) / 64.0));\n\n if (lightsBuffer.lights[index].position.y < lightExtent.min.y) {\n lightsBuffer.lights[index].position.y = lightExtent.max.y;\n }\n}\n",s="struct Uniforms {\n modelMatrix : mat4x4,\n normalModelMatrix : mat4x4,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(0) @binding(0) var uniforms : Uniforms;\n@group(0) @binding(1) var camera : Camera;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragNormal: vec3, // normal in world space\n @location(1) fragUV: vec2,\n}\n\n@vertex\nfn main(\n @location(0) position : vec3,\n @location(1) normal : vec3,\n @location(2) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n let worldPosition = (uniforms.modelMatrix * vec4(position, 1.0)).xyz;\n output.Position = camera.viewProjectionMatrix * vec4(worldPosition, 1.0);\n output.fragNormal = normalize((uniforms.normalModelMatrix * vec4(normal, 1.0)).xyz);\n output.fragUV = uv;\n return output;\n}\n",u="struct GBufferOutput {\n @location(0) normal : vec4,\n\n // Textures: diffuse color, specular color, smoothness, emissive etc. could go here\n @location(1) albedo : vec4,\n}\n\n@fragment\nfn main(\n @location(0) fragNormal: vec3,\n @location(1) fragUV : vec2\n) -> GBufferOutput {\n // faking some kind of checkerboard texture\n let uv = floor(30.0 * fragUV);\n let c = 0.2 + 0.5 * ((uv.x + uv.y) - 2.0 * floor((uv.x + uv.y) / 2.0));\n\n var output : GBufferOutput;\n output.normal = vec4(fragNormal, 1.0);\n output.albedo = vec4(c, c, c, 1.0);\n\n return output;\n}\n",f="@vertex\nfn main(\n @builtin(vertex_index) VertexIndex : u32\n) -> @builtin(position) vec4 {\n const pos = array(\n vec2(-1.0, -1.0), vec2(1.0, -1.0), vec2(-1.0, 1.0),\n vec2(-1.0, 1.0), vec2(1.0, -1.0), vec2(1.0, 1.0),\n );\n\n return vec4(pos[VertexIndex], 0.0, 1.0);\n}\n",d="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\noverride canvasSizeWidth: f32;\noverride canvasSizeHeight: f32;\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec4;\n let c = coord.xy / vec2(canvasSizeWidth, canvasSizeHeight);\n if (c.x < 0.33333) {\n let rawDepth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n // remap depth into something a bit more visible\n let depth = (1.0 - rawDepth) * 50.0;\n result = vec4(depth);\n } else if (c.x < 0.66667) {\n result = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n );\n result.x = (result.x + 1.0) * 0.5;\n result.y = (result.y + 1.0) * 0.5;\n result.z = (result.z + 1.0) * 0.5;\n } else {\n result = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n );\n }\n return result;\n}\n",l="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\nstruct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(1) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(1) @binding(1) var config: Config;\n@group(1) @binding(2) var camera: Camera;\n\nfn world_from_screen_coord(coord : vec2, depth_sample: f32) -> vec3 {\n // reconstruct world-space position from the screen coordinate.\n let posClip = vec4(coord.x * 2.0 - 1.0, (1.0 - coord.y) * 2.0 - 1.0, depth_sample, 1.0);\n let posWorldW = camera.invViewProjectionMatrix * posClip;\n let posWorld = posWorldW.xyz / posWorldW.www;\n return posWorld;\n}\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec3;\n\n let depth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n\n // Don't light the sky.\n if (depth >= 1.0) {\n discard;\n }\n\n let bufferSize = textureDimensions(gBufferDepth);\n let coordUV = coord.xy / vec2(bufferSize);\n let position = world_from_screen_coord(coordUV, depth);\n\n let normal = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n ).xyz;\n\n let albedo = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n ).rgb;\n\n for (var i = 0u; i < config.numLights; i++) {\n let L = lightsBuffer.lights[i].position.xyz - position;\n let distance = length(L);\n if (distance > lightsBuffer.lights[i].radius) {\n continue;\n }\n let lambert = max(dot(normal, normalize(L)), 0.0);\n result += vec3(\n lambert * pow(1.0 - distance / lightsBuffer.lights[i].radius, 2.0) * lightsBuffer.lights[i].color * albedo\n );\n }\n\n // some manual ambient\n result += vec3(0.2);\n\n return vec4(result, 1.0);\n}\n",c="src/sample/deferredRendering/main.ts";let g=i.R3.fromValues(-50,-30,-50),m=i.R3.fromValues(50,50,50),p=async e=>{let{canvas:n,pageState:t,gui:r}=e,c=await navigator.gpu.requestAdapter(),p=await c.requestDevice();if(!t.active)return;let h=n.getContext("webgpu"),v=window.devicePixelRatio;n.width=n.clientWidth*v,n.height=n.clientHeight*v;let x=n.width/n.height,b=navigator.gpu.getPreferredCanvasFormat();h.configure({device:p,format:b,alphaMode:"premultiplied"});let B=p.createBuffer({size:8*a.W.positions.length*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});{let P=new Float32Array(B.getMappedRange());for(let y=0;y{let e=p.createBuffer({size:Uint32Array.BYTES_PER_ELEMENT,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});return new Uint32Array(e.getMappedRange())[0]=z.numLights,e.unmap(),e})();r.add(z,"mode",["rendering","gBuffers view"]),r.add(z,"numLights",1,1024).step(1).onChange(()=>{p.queue.writeBuffer(I,0,new Uint32Array([z.numLights]))});let W=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),j=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),k=p.createBindGroup({layout:V.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:W}},{binding:1,resource:{buffer:j}}]}),q=p.createBindGroup({layout:D,entries:[{binding:0,resource:S[0]},{binding:1,resource:S[1]},{binding:2,resource:S[2]}]}),Y=i.R3.sub(m,g),H=8192*Float32Array.BYTES_PER_ELEMENT,Q=p.createBuffer({size:H,usage:GPUBufferUsage.STORAGE,mappedAtCreation:!0}),X=new Float32Array(Q.getMappedRange()),J=i.vh.create(),Z=0;for(let K=0;K<1024;K++){Z=8*K;for(let $=0;$<3;$++)J[$]=Math.random()*Y[$]+g[$];J[3]=1,X.set(J,Z),J[0]=2*Math.random(),J[1]=2*Math.random(),J[2]=2*Math.random(),J[3]=20,X.set(J,Z+4)}Q.unmap();let ee=p.createBuffer({size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),en=new Float32Array(8);en.set(g,0),en.set(m,4),p.queue.writeBuffer(ee,0,en.buffer,en.byteOffset,en.byteLength);let et=p.createComputePipeline({layout:"auto",compute:{module:p.createShaderModule({code:o}),entryPoint:"main"}}),er=p.createBindGroup({layout:A,entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:j}}]}),ei=p.createBindGroup({layout:et.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:ee}}]}),ea=i.R3.fromValues(0,50,-100),eo=i.R3.fromValues(0,1,0),es=i.R3.fromValues(0,0,0),eu=i._E.perspective(2*Math.PI/5,x,1,2e3),ef=i._E.translation([0,-45,0]);p.queue.writeBuffer(W,0,ef.buffer,ef.byteOffset,ef.byteLength);let ed=i._E.invert(ef);i._E.transpose(ed,ed),p.queue.writeBuffer(W,64,ed.buffer,ed.byteOffset,ed.byteLength),requestAnimationFrame(function e(){if(!t.active)return;let n=function(){let e=Math.PI*(Date.now()/5e3),n=i._E.rotateY(i._E.translation(es),e),t=i.R3.transformMat4(ea,n),r=i._E.lookAt(t,es,eo);return i._E.multiply(eu,r)}();p.queue.writeBuffer(j,0,n.buffer,n.byteOffset,n.byteLength);let r=i._E.invert(n);p.queue.writeBuffer(j,64,r.buffer,r.byteOffset,r.byteLength);let a=p.createCommandEncoder();{let o=a.beginRenderPass(F);o.setPipeline(V),o.setBindGroup(0,k),o.setVertexBuffer(0,B),o.setIndexBuffer(E,"uint16"),o.drawIndexed(w),o.end()}{let s=a.beginComputePass();s.setPipeline(et),s.setBindGroup(0,ei),s.dispatchWorkgroups(Math.ceil(16)),s.end()}if("gBuffers view"===z.mode){O.colorAttachments[0].view=h.getCurrentTexture().createView();let u=a.beginRenderPass(O);u.setPipeline(C),u.setBindGroup(0,q),u.draw(6),u.end()}else{O.colorAttachments[0].view=h.getCurrentTexture().createView();let f=a.beginRenderPass(O);f.setPipeline(N),f.setBindGroup(0,q),f.setBindGroup(1,er),f.draw(6),f.end()}p.queue.submit([a.finish()]),requestAnimationFrame(e)})},h=()=>(0,r.Tl)({name:"Deferred Rendering",description:"This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n ",gui:!0,init:p,sources:[{name:c.substring(29),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\nimport { mat4, vec3, vec4 } from 'wgpu-matrix';\nimport { mesh } from '../../meshes/stanfordDragon';\n\nimport lightUpdate from './lightUpdate.wgsl';\nimport vertexWriteGBuffers from './vertexWriteGBuffers.wgsl';\nimport fragmentWriteGBuffers from './fragmentWriteGBuffers.wgsl';\nimport vertexTextureQuad from './vertexTextureQuad.wgsl';\nimport fragmentGBuffersDebugView from './fragmentGBuffersDebugView.wgsl';\nimport fragmentDeferredRendering from './fragmentDeferredRendering.wgsl';\n\nconst kMaxNumLights = 1024;\nconst lightExtentMin = vec3.fromValues(-50, -30, -50);\nconst lightExtentMax = vec3.fromValues(50, 50, 50);\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const aspect = canvas.width / canvas.height;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create the model vertex buffer.\n const kVertexStride = 8;\n const vertexBuffer = device.createBuffer({\n // position: vec3, normal: vec3, uv: vec2\n size:\n mesh.positions.length * kVertexStride * Float32Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Float32Array(vertexBuffer.getMappedRange());\n for (let i = 0; i < mesh.positions.length; ++i) {\n mapping.set(mesh.positions[i], kVertexStride * i);\n mapping.set(mesh.normals[i], kVertexStride * i + 3);\n mapping.set(mesh.uvs[i], kVertexStride * i + 6);\n }\n vertexBuffer.unmap();\n }\n\n // Create the model index buffer.\n const indexCount = mesh.triangles.length * 3;\n const indexBuffer = device.createBuffer({\n size: indexCount * Uint16Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Uint16Array(indexBuffer.getMappedRange());\n for (let i = 0; i < mesh.triangles.length; ++i) {\n mapping.set(mesh.triangles[i], 3 * i);\n }\n indexBuffer.unmap();\n }\n\n // GBuffer texture render targets\n const gBufferTexture2DFloat16 = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'rgba16float',\n });\n const gBufferTextureAlbedo = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'bgra8unorm',\n });\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n });\n\n const gBufferTextureViews = [\n gBufferTexture2DFloat16.createView(),\n gBufferTextureAlbedo.createView(),\n depthTexture.createView(),\n ];\n\n const vertexBuffers: Iterable = [\n {\n arrayStride: Float32Array.BYTES_PER_ELEMENT * 8,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: Float32Array.BYTES_PER_ELEMENT * 3,\n format: 'float32x3',\n },\n {\n // uv\n shaderLocation: 2,\n offset: Float32Array.BYTES_PER_ELEMENT * 6,\n format: 'float32x2',\n },\n ],\n },\n ];\n\n const primitive: GPUPrimitiveState = {\n topology: 'triangle-list',\n cullMode: 'back',\n };\n\n const writeGBuffersPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: vertexWriteGBuffers,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentWriteGBuffers,\n }),\n entryPoint: 'main',\n targets: [\n // normal\n { format: 'rgba16float' },\n // albedo\n { format: 'bgra8unorm' },\n ],\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n primitive,\n });\n\n const gBufferTexturesBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'depth',\n },\n },\n ],\n });\n\n const lightsBufferBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'read-only-storage',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'uniform',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n buffer: {\n type: 'uniform',\n },\n },\n ],\n });\n\n const gBuffersDebugViewPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [gBufferTexturesBindGroupLayout],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentGBuffersDebugView,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n constants: {\n canvasSizeWidth: canvas.width,\n canvasSizeHeight: canvas.height,\n },\n },\n primitive,\n });\n\n const deferredRenderPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [\n gBufferTexturesBindGroupLayout,\n lightsBufferBindGroupLayout,\n ],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentDeferredRendering,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive,\n });\n\n const writeGBufferPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: gBufferTextureViews[0],\n\n clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n {\n view: gBufferTextureViews[1],\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const textureQuadPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n // view is acquired and set in render loop.\n view: undefined,\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const settings = {\n mode: 'rendering',\n numLights: 128,\n };\n const configUniformBuffer = (() => {\n const buffer = device.createBuffer({\n size: Uint32Array.BYTES_PER_ELEMENT,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n new Uint32Array(buffer.getMappedRange())[0] = settings.numLights;\n buffer.unmap();\n return buffer;\n })();\n\n gui.add(settings, 'mode', ['rendering', 'gBuffers view']);\n gui\n .add(settings, 'numLights', 1, kMaxNumLights)\n .step(1)\n .onChange(() => {\n device.queue.writeBuffer(\n configUniformBuffer,\n 0,\n new Uint32Array([settings.numLights])\n );\n });\n\n const modelUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const cameraUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneUniformBindGroup = device.createBindGroup({\n layout: writeGBuffersPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: modelUniformBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n\n const gBufferTexturesBindGroup = device.createBindGroup({\n layout: gBufferTexturesBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: gBufferTextureViews[0],\n },\n {\n binding: 1,\n resource: gBufferTextureViews[1],\n },\n {\n binding: 2,\n resource: gBufferTextureViews[2],\n },\n ],\n });\n\n // Lights data are uploaded in a storage buffer\n // which could be updated/culled/etc. with a compute shader\n const extent = vec3.sub(lightExtentMax, lightExtentMin);\n const lightDataStride = 8;\n const bufferSizeInByte =\n Float32Array.BYTES_PER_ELEMENT * lightDataStride * kMaxNumLights;\n const lightsBuffer = device.createBuffer({\n size: bufferSizeInByte,\n usage: GPUBufferUsage.STORAGE,\n mappedAtCreation: true,\n });\n\n // We randomaly populate lights randomly in a box range\n // And simply move them along y-axis per frame to show they are\n // dynamic lightings\n const lightData = new Float32Array(lightsBuffer.getMappedRange());\n const tmpVec4 = vec4.create();\n let offset = 0;\n for (let i = 0; i < kMaxNumLights; i++) {\n offset = lightDataStride * i;\n // position\n for (let i = 0; i < 3; i++) {\n tmpVec4[i] = Math.random() * extent[i] + lightExtentMin[i];\n }\n tmpVec4[3] = 1;\n lightData.set(tmpVec4, offset);\n // color\n tmpVec4[0] = Math.random() * 2;\n tmpVec4[1] = Math.random() * 2;\n tmpVec4[2] = Math.random() * 2;\n // radius\n tmpVec4[3] = 20.0;\n lightData.set(tmpVec4, offset + 4);\n }\n lightsBuffer.unmap();\n\n const lightExtentBuffer = device.createBuffer({\n size: 4 * 8,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const lightExtentData = new Float32Array(8);\n lightExtentData.set(lightExtentMin, 0);\n lightExtentData.set(lightExtentMax, 4);\n device.queue.writeBuffer(\n lightExtentBuffer,\n 0,\n lightExtentData.buffer,\n lightExtentData.byteOffset,\n lightExtentData.byteLength\n );\n\n const lightUpdateComputePipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: lightUpdate,\n }),\n entryPoint: 'main',\n },\n });\n const lightsBufferBindGroup = device.createBindGroup({\n layout: lightsBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n const lightsBufferComputeBindGroup = device.createBindGroup({\n layout: lightUpdateComputePipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: lightExtentBuffer,\n },\n },\n ],\n });\n //--------------------\n\n // Scene matrices\n const eyePosition = vec3.fromValues(0, 50, -100);\n const upVector = vec3.fromValues(0, 1, 0);\n const origin = vec3.fromValues(0, 0, 0);\n\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 2000.0\n );\n\n // Move the model so it's centered.\n const modelMatrix = mat4.translation([0, -45, 0]);\n\n const modelData = modelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 0,\n modelData.buffer,\n modelData.byteOffset,\n modelData.byteLength\n );\n const invertTransposeModelMatrix = mat4.invert(modelMatrix);\n mat4.transpose(invertTransposeModelMatrix, invertTransposeModelMatrix);\n const normalModelData = invertTransposeModelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 64,\n normalModelData.buffer,\n normalModelData.byteOffset,\n normalModelData.byteLength\n );\n\n // Rotates the camera around the origin based on time.\n function getCameraViewProjMatrix() {\n const rad = Math.PI * (Date.now() / 5000);\n const rotation = mat4.rotateY(mat4.translation(origin), rad);\n const rotatedEyePosition = vec3.transformMat4(eyePosition, rotation);\n\n const viewMatrix = mat4.lookAt(rotatedEyePosition, origin, upVector);\n\n return mat4.multiply(projectionMatrix, viewMatrix) as Float32Array;\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const cameraViewProj = getCameraViewProjMatrix();\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 0,\n cameraViewProj.buffer,\n cameraViewProj.byteOffset,\n cameraViewProj.byteLength\n );\n const cameraInvViewProj = mat4.invert(cameraViewProj) as Float32Array;\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 64,\n cameraInvViewProj.buffer,\n cameraInvViewProj.byteOffset,\n cameraInvViewProj.byteLength\n );\n\n const commandEncoder = device.createCommandEncoder();\n {\n // Write position, normal, albedo etc. data to gBuffers\n const gBufferPass = commandEncoder.beginRenderPass(\n writeGBufferPassDescriptor\n );\n gBufferPass.setPipeline(writeGBuffersPipeline);\n gBufferPass.setBindGroup(0, sceneUniformBindGroup);\n gBufferPass.setVertexBuffer(0, vertexBuffer);\n gBufferPass.setIndexBuffer(indexBuffer, 'uint16');\n gBufferPass.drawIndexed(indexCount);\n gBufferPass.end();\n }\n {\n // Update lights position\n const lightPass = commandEncoder.beginComputePass();\n lightPass.setPipeline(lightUpdateComputePipeline);\n lightPass.setBindGroup(0, lightsBufferComputeBindGroup);\n lightPass.dispatchWorkgroups(Math.ceil(kMaxNumLights / 64));\n lightPass.end();\n }\n {\n if (settings.mode === 'gBuffers view') {\n // GBuffers debug view\n // Left: depth\n // Middle: normal\n // Right: albedo (use uv to mimic a checkerboard texture)\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const debugViewPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n debugViewPass.setPipeline(gBuffersDebugViewPipeline);\n debugViewPass.setBindGroup(0, gBufferTexturesBindGroup);\n debugViewPass.draw(6);\n debugViewPass.end();\n } else {\n // Deferred rendering\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const deferredRenderingPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n deferredRenderingPass.setPipeline(deferredRenderPipeline);\n deferredRenderingPass.setBindGroup(0, gBufferTexturesBindGroup);\n deferredRenderingPass.setBindGroup(1, lightsBufferBindGroup);\n deferredRenderingPass.draw(6);\n deferredRenderingPass.end();\n }\n }\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst DeferredRendering: () => JSX.Element = () =>\n makeSample({\n name: 'Deferred Rendering',\n description: `This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n `,\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: 'vertexWriteGBuffers.wgsl',\n contents: vertexWriteGBuffers,\n editable: true,\n },\n {\n name: 'fragmentWriteGBuffers.wgsl',\n contents: fragmentWriteGBuffers,\n editable: true,\n },\n {\n name: 'vertexTextureQuad.wgsl',\n contents: vertexTextureQuad,\n editable: true,\n },\n {\n name: 'fragmentGBuffersDebugView.wgsl',\n contents: fragmentGBuffersDebugView,\n editable: true,\n },\n {\n name: 'fragmentDeferredRendering.wgsl',\n contents: fragmentDeferredRendering,\n editable: true,\n },\n {\n name: 'lightUpdate.wgsl',\n contents: lightUpdate,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default DeferredRendering;\n"},{name:"vertexWriteGBuffers.wgsl",contents:s,editable:!0},{name:"fragmentWriteGBuffers.wgsl",contents:u,editable:!0},{name:"vertexTextureQuad.wgsl",contents:f,editable:!0},{name:"fragmentGBuffersDebugView.wgsl",contents:d,editable:!0},{name:"fragmentDeferredRendering.wgsl",contents:l,editable:!0},{name:"lightUpdate.wgsl",contents:o,editable:!0}],filename:c});var v=h},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/704.3b4b79ac2db1162c.js b/_next/static/chunks/704.3b4b79ac2db1162c.js deleted file mode 100644 index fb926088..00000000 --- a/_next/static/chunks/704.3b4b79ac2db1162c.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[704],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return c}});var r=t(5893),i=t(9008),a=t.n(i),o=t(1163),s=t(7294),u=t(9147),f=t.n(u);t(7319);let l=e=>{let n=(0,s.useRef)(null),i=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:i}=e;return{name:n,...function(e){let n;let i=null;{i=document.createElement("div");let a=t(4631);n=a(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){i&&t&&(t.appendChild(i),n.setOption("value",e))}})})}}}(i)}}),e.sources),u=(0,s.useRef)(null),l=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376),r=new n.GUI({autoPlace:!1});return r.domElement.style.position="relative",r.domElement.style.zIndex="1000",r}},[]),d=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),g=(0,o.useRouter)(),m=g.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[p,h]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?x(m[1]):x(i[0].name),l&&u.current)for(u.current.appendChild(l.domElement);l.__controllers.length>0;)l.__controllers[0].remove();c&&d.current&&(c.dom.style.position="absolute",c.showPanel(1),d.current.appendChild(c.dom));let t={active:!0},r=()=>{t.active=!1};try{let a=n.current;if(!a)throw Error("The canvas is not available");let o=e.init({canvas:a,pageState:t,gui:l,stats:c});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(a(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),p?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(p)})]}):null]}),(0,r.jsxs)("div",{className:f().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:f().sourceFileNav,children:(0,r.jsx)("ul",{children:i.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),i.map((e,n)=>(0,r.jsx)(e.Container,{className:f().sourceFileContainer,"data-active":v==e.name},n))]})]})},d=e=>(0,r.jsx)(l,{...e});function c(e,n){if(!e)throw Error(n)}},6888:function(e,n,t){"use strict";t.d(n,{W:function(){return a}});var r=t(6906),i=t(9385);let a={positions:r.m,triangles:r.g,normals:[],uvs:[]};a.normals=(0,i.b)(a.positions,a.triangles),a.uvs=(0,i.q)(a.positions,"xy"),a.triangles.push([a.positions.length,a.positions.length+2,a.positions.length+1],[a.positions.length,a.positions.length+1,a.positions.length+3]),a.positions.push([-100,20,-100],[100,20,100],[-100,20,100],[100,20,-100]),a.normals.push([0,1,0],[0,1,0],[0,1,0],[0,1,0]),a.uvs.push([0,0],[1,1],[0,1],[1,0])},9385:function(e,n,t){"use strict";t.d(n,{b:function(){return i},q:function(){return o}});var r=t(6416);function i(e,n){let t=e.map(()=>[0,0,0]);return n.forEach(n=>{let[i,a,o]=n,s=e[i],u=e[a],f=e[o],l=r.R3.subtract(u,s),d=r.R3.subtract(f,s);r.R3.normalize(l,l),r.R3.normalize(d,d);let c=r.R3.cross(l,d);r.R3.add(t[i],c,t[i]),r.R3.add(t[a],c,t[a]),r.R3.add(t[o],c,t[o])}),t.forEach(e=>{r.R3.normalize(e,e)}),t}let a={xy:[0,1],xz:[0,2],yz:[1,2]};function o(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"xy",t=a[n],r=e.map(()=>[0,0]),i=[1/0,1/0],o=[-1/0,-1/0];return e.forEach((e,n)=>{r[n][0]=e[t[0]],r[n][1]=e[t[1]],i[0]=Math.min(e[t[0]],i[0]),i[1]=Math.min(e[t[1]],i[1]),o[0]=Math.max(e[t[0]],o[0]),o[1]=Math.max(e[t[1]],o[1])}),r.forEach(e=>{e[0]=(e[0]-i[0])/(o[0]-i[0]),e[1]=(e[1]-i[1])/(o[1]-i[1])}),r}},9704:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return v}});var r=t(5671),i=t(6416),a=t(6888),o="struct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(0) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\n@group(0) @binding(1) var config: Config;\n\nstruct LightExtent {\n min : vec4,\n max : vec4,\n}\n@group(0) @binding(2) var lightExtent: LightExtent;\n\n@compute @workgroup_size(64, 1, 1)\nfn main(@builtin(global_invocation_id) GlobalInvocationID : vec3) {\n var index = GlobalInvocationID.x;\n if (index >= config.numLights) {\n return;\n }\n\n lightsBuffer.lights[index].position.y = lightsBuffer.lights[index].position.y - 0.5 - 0.003 * (f32(index) - 64.0 * floor(f32(index) / 64.0));\n\n if (lightsBuffer.lights[index].position.y < lightExtent.min.y) {\n lightsBuffer.lights[index].position.y = lightExtent.max.y;\n }\n}\n",s="struct Uniforms {\n modelMatrix : mat4x4,\n normalModelMatrix : mat4x4,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(0) @binding(0) var uniforms : Uniforms;\n@group(0) @binding(1) var camera : Camera;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragNormal: vec3, // normal in world space\n @location(1) fragUV: vec2,\n}\n\n@vertex\nfn main(\n @location(0) position : vec3,\n @location(1) normal : vec3,\n @location(2) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n let worldPosition = (uniforms.modelMatrix * vec4(position, 1.0)).xyz;\n output.Position = camera.viewProjectionMatrix * vec4(worldPosition, 1.0);\n output.fragNormal = normalize((uniforms.normalModelMatrix * vec4(normal, 1.0)).xyz);\n output.fragUV = uv;\n return output;\n}\n",u="struct GBufferOutput {\n @location(0) normal : vec4,\n\n // Textures: diffuse color, specular color, smoothness, emissive etc. could go here\n @location(1) albedo : vec4,\n}\n\n@fragment\nfn main(\n @location(0) fragNormal: vec3,\n @location(1) fragUV : vec2\n) -> GBufferOutput {\n // faking some kind of checkerboard texture\n let uv = floor(30.0 * fragUV);\n let c = 0.2 + 0.5 * ((uv.x + uv.y) - 2.0 * floor((uv.x + uv.y) / 2.0));\n\n var output : GBufferOutput;\n output.normal = vec4(fragNormal, 1.0);\n output.albedo = vec4(c, c, c, 1.0);\n\n return output;\n}\n",f="@vertex\nfn main(\n @builtin(vertex_index) VertexIndex : u32\n) -> @builtin(position) vec4 {\n const pos = array(\n vec2(-1.0, -1.0), vec2(1.0, -1.0), vec2(-1.0, 1.0),\n vec2(-1.0, 1.0), vec2(1.0, -1.0), vec2(1.0, 1.0),\n );\n\n return vec4(pos[VertexIndex], 0.0, 1.0);\n}\n",l="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\noverride canvasSizeWidth: f32;\noverride canvasSizeHeight: f32;\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec4;\n let c = coord.xy / vec2(canvasSizeWidth, canvasSizeHeight);\n if (c.x < 0.33333) {\n let rawDepth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n // remap depth into something a bit more visible\n let depth = (1.0 - rawDepth) * 50.0;\n result = vec4(depth);\n } else if (c.x < 0.66667) {\n result = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n );\n result.x = (result.x + 1.0) * 0.5;\n result.y = (result.y + 1.0) * 0.5;\n result.z = (result.z + 1.0) * 0.5;\n } else {\n result = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n );\n }\n return result;\n}\n",d="\n@group(0) @binding(0) var gBufferNormal: texture_2d;\n@group(0) @binding(1) var gBufferAlbedo: texture_2d;\n@group(0) @binding(2) var gBufferDepth: texture_depth_2d;\n\nstruct LightData {\n position : vec4,\n color : vec3,\n radius : f32,\n}\nstruct LightsBuffer {\n lights: array,\n}\n@group(1) @binding(0) var lightsBuffer: LightsBuffer;\n\nstruct Config {\n numLights : u32,\n}\nstruct Camera {\n viewProjectionMatrix : mat4x4,\n invViewProjectionMatrix : mat4x4,\n}\n@group(1) @binding(1) var config: Config;\n@group(1) @binding(2) var camera: Camera;\n\nfn world_from_screen_coord(coord : vec2, depth_sample: f32) -> vec3 {\n // reconstruct world-space position from the screen coordinate.\n let posClip = vec4(coord.x * 2.0 - 1.0, (1.0 - coord.y) * 2.0 - 1.0, depth_sample, 1.0);\n let posWorldW = camera.invViewProjectionMatrix * posClip;\n let posWorld = posWorldW.xyz / posWorldW.www;\n return posWorld;\n}\n\n@fragment\nfn main(\n @builtin(position) coord : vec4\n) -> @location(0) vec4 {\n var result : vec3;\n\n let depth = textureLoad(\n gBufferDepth,\n vec2(floor(coord.xy)),\n 0\n );\n\n // Don't light the sky.\n if (depth >= 1.0) {\n discard;\n }\n\n let bufferSize = textureDimensions(gBufferDepth);\n let coordUV = coord.xy / vec2(bufferSize);\n let position = world_from_screen_coord(coordUV, depth);\n\n let normal = textureLoad(\n gBufferNormal,\n vec2(floor(coord.xy)),\n 0\n ).xyz;\n\n let albedo = textureLoad(\n gBufferAlbedo,\n vec2(floor(coord.xy)),\n 0\n ).rgb;\n\n for (var i = 0u; i < config.numLights; i++) {\n let L = lightsBuffer.lights[i].position.xyz - position;\n let distance = length(L);\n if (distance > lightsBuffer.lights[i].radius) {\n continue;\n }\n let lambert = max(dot(normal, normalize(L)), 0.0);\n result += vec3(\n lambert * pow(1.0 - distance / lightsBuffer.lights[i].radius, 2.0) * lightsBuffer.lights[i].color * albedo\n );\n }\n\n // some manual ambient\n result += vec3(0.2);\n\n return vec4(result, 1.0);\n}\n",c="src/sample/deferredRendering/main.ts";let g=i.R3.fromValues(-50,-30,-50),m=i.R3.fromValues(50,50,50),p=async e=>{let{canvas:n,pageState:t,gui:r}=e,c=await navigator.gpu.requestAdapter(),p=await c.requestDevice();if(!t.active)return;let h=n.getContext("webgpu"),v=window.devicePixelRatio;n.width=n.clientWidth*v,n.height=n.clientHeight*v;let x=n.width/n.height,b=navigator.gpu.getPreferredCanvasFormat();h.configure({device:p,format:b,alphaMode:"premultiplied"});let B=p.createBuffer({size:8*a.W.positions.length*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});{let P=new Float32Array(B.getMappedRange());for(let y=0;y{let e=p.createBuffer({size:Uint32Array.BYTES_PER_ELEMENT,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});return new Uint32Array(e.getMappedRange())[0]=z.numLights,e.unmap(),e})();r.add(z,"mode",["rendering","gBuffers view"]),r.add(z,"numLights",1,1024).step(1).onChange(()=>{p.queue.writeBuffer(I,0,new Uint32Array([z.numLights]))});let j=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),W=p.createBuffer({size:128,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),k=p.createBindGroup({layout:V.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:j}},{binding:1,resource:{buffer:W}}]}),q=p.createBindGroup({layout:A,entries:[{binding:0,resource:S[0]},{binding:1,resource:S[1]},{binding:2,resource:S[2]}]}),Y=i.R3.sub(m,g),H=8192*Float32Array.BYTES_PER_ELEMENT,Q=p.createBuffer({size:H,usage:GPUBufferUsage.STORAGE,mappedAtCreation:!0}),X=new Float32Array(Q.getMappedRange()),J=i.vh.create(),Z=0;for(let K=0;K<1024;K++){Z=8*K;for(let $=0;$<3;$++)J[$]=Math.random()*Y[$]+g[$];J[3]=1,X.set(J,Z),J[0]=2*Math.random(),J[1]=2*Math.random(),J[2]=2*Math.random(),J[3]=20,X.set(J,Z+4)}Q.unmap();let ee=p.createBuffer({size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),en=new Float32Array(8);en.set(g,0),en.set(m,4),p.queue.writeBuffer(ee,0,en.buffer,en.byteOffset,en.byteLength);let et=p.createComputePipeline({layout:"auto",compute:{module:p.createShaderModule({code:o}),entryPoint:"main"}}),er=p.createBindGroup({layout:D,entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:W}}]}),ei=p.createBindGroup({layout:et.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:Q}},{binding:1,resource:{buffer:I}},{binding:2,resource:{buffer:ee}}]}),ea=i.R3.fromValues(0,50,-100),eo=i.R3.fromValues(0,1,0),es=i.R3.fromValues(0,0,0),eu=i._E.perspective(2*Math.PI/5,x,1,2e3),ef=i._E.inverse(i._E.lookAt(ea,es,eo)),el=i._E.multiply(eu,ef),ed=i._E.translation([0,-45,0]);p.queue.writeBuffer(j,0,ed.buffer,ed.byteOffset,ed.byteLength);let ec=i._E.invert(ed);i._E.transpose(ec,ec),p.queue.writeBuffer(j,64,ec.buffer,ec.byteOffset,ec.byteLength),requestAnimationFrame(function e(){if(!t.active)return;let n=function(){let e=i.R3.fromValues(0,50,-100),n=Math.PI*(Date.now()/5e3),t=i._E.rotateY(i._E.translation(es),n);i.R3.transformMat4(e,t,e);let r=i.R3.transformMat4(e,t),a=i._E.lookAt(r,es,eo);return i._E.multiply(eu,a,el),el}();p.queue.writeBuffer(W,0,n.buffer,n.byteOffset,n.byteLength);let r=i._E.invert(n);p.queue.writeBuffer(W,64,r.buffer,r.byteOffset,r.byteLength);let a=p.createCommandEncoder();{let o=a.beginRenderPass(F);o.setPipeline(V),o.setBindGroup(0,k),o.setVertexBuffer(0,B),o.setIndexBuffer(E,"uint16"),o.drawIndexed(w),o.end()}{let s=a.beginComputePass();s.setPipeline(et),s.setBindGroup(0,ei),s.dispatchWorkgroups(Math.ceil(16)),s.end()}if("gBuffers view"===z.mode){O.colorAttachments[0].view=h.getCurrentTexture().createView();let u=a.beginRenderPass(O);u.setPipeline(C),u.setBindGroup(0,q),u.draw(6),u.end()}else{O.colorAttachments[0].view=h.getCurrentTexture().createView();let f=a.beginRenderPass(O);f.setPipeline(N),f.setBindGroup(0,q),f.setBindGroup(1,er),f.draw(6),f.end()}p.queue.submit([a.finish()]),requestAnimationFrame(e)})},h=()=>(0,r.Tl)({name:"Deferred Rendering",description:"This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n ",gui:!0,init:p,sources:[{name:c.substring(29),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\nimport { mat4, vec3, vec4 } from 'wgpu-matrix';\nimport { mesh } from '../../meshes/stanfordDragon';\n\nimport lightUpdate from './lightUpdate.wgsl';\nimport vertexWriteGBuffers from './vertexWriteGBuffers.wgsl';\nimport fragmentWriteGBuffers from './fragmentWriteGBuffers.wgsl';\nimport vertexTextureQuad from './vertexTextureQuad.wgsl';\nimport fragmentGBuffersDebugView from './fragmentGBuffersDebugView.wgsl';\nimport fragmentDeferredRendering from './fragmentDeferredRendering.wgsl';\n\nconst kMaxNumLights = 1024;\nconst lightExtentMin = vec3.fromValues(-50, -30, -50);\nconst lightExtentMax = vec3.fromValues(50, 50, 50);\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const aspect = canvas.width / canvas.height;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create the model vertex buffer.\n const kVertexStride = 8;\n const vertexBuffer = device.createBuffer({\n // position: vec3, normal: vec3, uv: vec2\n size:\n mesh.positions.length * kVertexStride * Float32Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Float32Array(vertexBuffer.getMappedRange());\n for (let i = 0; i < mesh.positions.length; ++i) {\n mapping.set(mesh.positions[i], kVertexStride * i);\n mapping.set(mesh.normals[i], kVertexStride * i + 3);\n mapping.set(mesh.uvs[i], kVertexStride * i + 6);\n }\n vertexBuffer.unmap();\n }\n\n // Create the model index buffer.\n const indexCount = mesh.triangles.length * 3;\n const indexBuffer = device.createBuffer({\n size: indexCount * Uint16Array.BYTES_PER_ELEMENT,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n {\n const mapping = new Uint16Array(indexBuffer.getMappedRange());\n for (let i = 0; i < mesh.triangles.length; ++i) {\n mapping.set(mesh.triangles[i], 3 * i);\n }\n indexBuffer.unmap();\n }\n\n // GBuffer texture render targets\n const gBufferTexture2DFloat16 = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'rgba16float',\n });\n const gBufferTextureAlbedo = device.createTexture({\n size: [canvas.width, canvas.height],\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n format: 'bgra8unorm',\n });\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,\n });\n\n const gBufferTextureViews = [\n gBufferTexture2DFloat16.createView(),\n gBufferTextureAlbedo.createView(),\n depthTexture.createView(),\n ];\n\n const vertexBuffers: Iterable = [\n {\n arrayStride: Float32Array.BYTES_PER_ELEMENT * 8,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: 0,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: Float32Array.BYTES_PER_ELEMENT * 3,\n format: 'float32x3',\n },\n {\n // uv\n shaderLocation: 2,\n offset: Float32Array.BYTES_PER_ELEMENT * 6,\n format: 'float32x2',\n },\n ],\n },\n ];\n\n const primitive: GPUPrimitiveState = {\n topology: 'triangle-list',\n cullMode: 'back',\n };\n\n const writeGBuffersPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: vertexWriteGBuffers,\n }),\n entryPoint: 'main',\n buffers: vertexBuffers,\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentWriteGBuffers,\n }),\n entryPoint: 'main',\n targets: [\n // normal\n { format: 'rgba16float' },\n // albedo\n { format: 'bgra8unorm' },\n ],\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n primitive,\n });\n\n const gBufferTexturesBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'unfilterable-float',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n texture: {\n sampleType: 'depth',\n },\n },\n ],\n });\n\n const lightsBufferBindGroupLayout = device.createBindGroupLayout({\n entries: [\n {\n binding: 0,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'read-only-storage',\n },\n },\n {\n binding: 1,\n visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,\n buffer: {\n type: 'uniform',\n },\n },\n {\n binding: 2,\n visibility: GPUShaderStage.FRAGMENT,\n buffer: {\n type: 'uniform',\n },\n },\n ],\n });\n\n const gBuffersDebugViewPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [gBufferTexturesBindGroupLayout],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentGBuffersDebugView,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n constants: {\n canvasSizeWidth: canvas.width,\n canvasSizeHeight: canvas.height,\n },\n },\n primitive,\n });\n\n const deferredRenderPipeline = device.createRenderPipeline({\n layout: device.createPipelineLayout({\n bindGroupLayouts: [\n gBufferTexturesBindGroupLayout,\n lightsBufferBindGroupLayout,\n ],\n }),\n vertex: {\n module: device.createShaderModule({\n code: vertexTextureQuad,\n }),\n entryPoint: 'main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fragmentDeferredRendering,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive,\n });\n\n const writeGBufferPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: gBufferTextureViews[0],\n\n clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n {\n view: gBufferTextureViews[1],\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const textureQuadPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n // view is acquired and set in render loop.\n view: undefined,\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const settings = {\n mode: 'rendering',\n numLights: 128,\n };\n const configUniformBuffer = (() => {\n const buffer = device.createBuffer({\n size: Uint32Array.BYTES_PER_ELEMENT,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n new Uint32Array(buffer.getMappedRange())[0] = settings.numLights;\n buffer.unmap();\n return buffer;\n })();\n\n gui.add(settings, 'mode', ['rendering', 'gBuffers view']);\n gui\n .add(settings, 'numLights', 1, kMaxNumLights)\n .step(1)\n .onChange(() => {\n device.queue.writeBuffer(\n configUniformBuffer,\n 0,\n new Uint32Array([settings.numLights])\n );\n });\n\n const modelUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const cameraUniformBuffer = device.createBuffer({\n size: 4 * 16 * 2, // two 4x4 matrix\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sceneUniformBindGroup = device.createBindGroup({\n layout: writeGBuffersPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: modelUniformBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n\n const gBufferTexturesBindGroup = device.createBindGroup({\n layout: gBufferTexturesBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: gBufferTextureViews[0],\n },\n {\n binding: 1,\n resource: gBufferTextureViews[1],\n },\n {\n binding: 2,\n resource: gBufferTextureViews[2],\n },\n ],\n });\n\n // Lights data are uploaded in a storage buffer\n // which could be updated/culled/etc. with a compute shader\n const extent = vec3.sub(lightExtentMax, lightExtentMin);\n const lightDataStride = 8;\n const bufferSizeInByte =\n Float32Array.BYTES_PER_ELEMENT * lightDataStride * kMaxNumLights;\n const lightsBuffer = device.createBuffer({\n size: bufferSizeInByte,\n usage: GPUBufferUsage.STORAGE,\n mappedAtCreation: true,\n });\n\n // We randomaly populate lights randomly in a box range\n // And simply move them along y-axis per frame to show they are\n // dynamic lightings\n const lightData = new Float32Array(lightsBuffer.getMappedRange());\n const tmpVec4 = vec4.create();\n let offset = 0;\n for (let i = 0; i < kMaxNumLights; i++) {\n offset = lightDataStride * i;\n // position\n for (let i = 0; i < 3; i++) {\n tmpVec4[i] = Math.random() * extent[i] + lightExtentMin[i];\n }\n tmpVec4[3] = 1;\n lightData.set(tmpVec4, offset);\n // color\n tmpVec4[0] = Math.random() * 2;\n tmpVec4[1] = Math.random() * 2;\n tmpVec4[2] = Math.random() * 2;\n // radius\n tmpVec4[3] = 20.0;\n lightData.set(tmpVec4, offset + 4);\n }\n lightsBuffer.unmap();\n\n const lightExtentBuffer = device.createBuffer({\n size: 4 * 8,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const lightExtentData = new Float32Array(8);\n lightExtentData.set(lightExtentMin, 0);\n lightExtentData.set(lightExtentMax, 4);\n device.queue.writeBuffer(\n lightExtentBuffer,\n 0,\n lightExtentData.buffer,\n lightExtentData.byteOffset,\n lightExtentData.byteLength\n );\n\n const lightUpdateComputePipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: lightUpdate,\n }),\n entryPoint: 'main',\n },\n });\n const lightsBufferBindGroup = device.createBindGroup({\n layout: lightsBufferBindGroupLayout,\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: cameraUniformBuffer,\n },\n },\n ],\n });\n const lightsBufferComputeBindGroup = device.createBindGroup({\n layout: lightUpdateComputePipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: lightsBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: configUniformBuffer,\n },\n },\n {\n binding: 2,\n resource: {\n buffer: lightExtentBuffer,\n },\n },\n ],\n });\n //--------------------\n\n // Scene matrices\n const eyePosition = vec3.fromValues(0, 50, -100);\n const upVector = vec3.fromValues(0, 1, 0);\n const origin = vec3.fromValues(0, 0, 0);\n\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 2000.0\n );\n\n const viewMatrix = mat4.inverse(mat4.lookAt(eyePosition, origin, upVector));\n\n const viewProjMatrix = mat4.multiply(projectionMatrix, viewMatrix);\n\n // Move the model so it's centered.\n const modelMatrix = mat4.translation([0, -45, 0]);\n\n const modelData = modelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 0,\n modelData.buffer,\n modelData.byteOffset,\n modelData.byteLength\n );\n const invertTransposeModelMatrix = mat4.invert(modelMatrix);\n mat4.transpose(invertTransposeModelMatrix, invertTransposeModelMatrix);\n const normalModelData = invertTransposeModelMatrix as Float32Array;\n device.queue.writeBuffer(\n modelUniformBuffer,\n 64,\n normalModelData.buffer,\n normalModelData.byteOffset,\n normalModelData.byteLength\n );\n\n // Rotates the camera around the origin based on time.\n function getCameraViewProjMatrix() {\n const eyePosition = vec3.fromValues(0, 50, -100);\n\n const rad = Math.PI * (Date.now() / 5000);\n const rotation = mat4.rotateY(mat4.translation(origin), rad);\n vec3.transformMat4(eyePosition, rotation, eyePosition);\n const rotatedEyePosition = vec3.transformMat4(eyePosition, rotation);\n\n const viewMatrix = mat4.lookAt(rotatedEyePosition, origin, upVector);\n\n mat4.multiply(projectionMatrix, viewMatrix, viewProjMatrix);\n return viewProjMatrix as Float32Array;\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const cameraViewProj = getCameraViewProjMatrix();\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 0,\n cameraViewProj.buffer,\n cameraViewProj.byteOffset,\n cameraViewProj.byteLength\n );\n const cameraInvViewProj = mat4.invert(cameraViewProj) as Float32Array;\n device.queue.writeBuffer(\n cameraUniformBuffer,\n 64,\n cameraInvViewProj.buffer,\n cameraInvViewProj.byteOffset,\n cameraInvViewProj.byteLength\n );\n\n const commandEncoder = device.createCommandEncoder();\n {\n // Write position, normal, albedo etc. data to gBuffers\n const gBufferPass = commandEncoder.beginRenderPass(\n writeGBufferPassDescriptor\n );\n gBufferPass.setPipeline(writeGBuffersPipeline);\n gBufferPass.setBindGroup(0, sceneUniformBindGroup);\n gBufferPass.setVertexBuffer(0, vertexBuffer);\n gBufferPass.setIndexBuffer(indexBuffer, 'uint16');\n gBufferPass.drawIndexed(indexCount);\n gBufferPass.end();\n }\n {\n // Update lights position\n const lightPass = commandEncoder.beginComputePass();\n lightPass.setPipeline(lightUpdateComputePipeline);\n lightPass.setBindGroup(0, lightsBufferComputeBindGroup);\n lightPass.dispatchWorkgroups(Math.ceil(kMaxNumLights / 64));\n lightPass.end();\n }\n {\n if (settings.mode === 'gBuffers view') {\n // GBuffers debug view\n // Left: depth\n // Middle: normal\n // Right: albedo (use uv to mimic a checkerboard texture)\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const debugViewPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n debugViewPass.setPipeline(gBuffersDebugViewPipeline);\n debugViewPass.setBindGroup(0, gBufferTexturesBindGroup);\n debugViewPass.draw(6);\n debugViewPass.end();\n } else {\n // Deferred rendering\n textureQuadPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n const deferredRenderingPass = commandEncoder.beginRenderPass(\n textureQuadPassDescriptor\n );\n deferredRenderingPass.setPipeline(deferredRenderPipeline);\n deferredRenderingPass.setBindGroup(0, gBufferTexturesBindGroup);\n deferredRenderingPass.setBindGroup(1, lightsBufferBindGroup);\n deferredRenderingPass.draw(6);\n deferredRenderingPass.end();\n }\n }\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst DeferredRendering: () => JSX.Element = () =>\n makeSample({\n name: 'Deferred Rendering',\n description: `This example shows how to do deferred rendering with webgpu.\n Render geometry info to multiple targets in the gBuffers in the first pass.\n In this sample we have 2 gBuffers for normals and albedo, along with a depth texture.\n And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity.\n World-space positions are reconstructed from the depth texture and camera matrix.\n We also update light position in a compute shader, where further operations like tile/cluster culling could happen.\n The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer\n in the middle, and the albedo G-buffer on the right side of the screen.\n `,\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: 'vertexWriteGBuffers.wgsl',\n contents: vertexWriteGBuffers,\n editable: true,\n },\n {\n name: 'fragmentWriteGBuffers.wgsl',\n contents: fragmentWriteGBuffers,\n editable: true,\n },\n {\n name: 'vertexTextureQuad.wgsl',\n contents: vertexTextureQuad,\n editable: true,\n },\n {\n name: 'fragmentGBuffersDebugView.wgsl',\n contents: fragmentGBuffersDebugView,\n editable: true,\n },\n {\n name: 'fragmentDeferredRendering.wgsl',\n contents: fragmentDeferredRendering,\n editable: true,\n },\n {\n name: 'lightUpdate.wgsl',\n contents: lightUpdate,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default DeferredRendering;\n"},{name:"vertexWriteGBuffers.wgsl",contents:s,editable:!0},{name:"fragmentWriteGBuffers.wgsl",contents:u,editable:!0},{name:"vertexTextureQuad.wgsl",contents:f,editable:!0},{name:"fragmentGBuffersDebugView.wgsl",contents:l,editable:!0},{name:"fragmentDeferredRendering.wgsl",contents:d,editable:!0},{name:"lightUpdate.wgsl",contents:o,editable:!0}],filename:c});var v=h},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/webpack-2fbd1f7b2788aa97.js b/_next/static/chunks/webpack-af8ade16c1360358.js similarity index 98% rename from _next/static/chunks/webpack-2fbd1f7b2788aa97.js rename to _next/static/chunks/webpack-af8ade16c1360358.js index 64419a58..5c770a8d 100644 --- a/_next/static/chunks/webpack-2fbd1f7b2788aa97.js +++ b/_next/static/chunks/webpack-af8ade16c1360358.js @@ -1 +1 @@ -!function(){"use strict";var e,t,r,n,f,a,c,o,i,u,b={},d={};function l(e){var t=d[e];if(void 0!==t)return t.exports;var r=d[e]={exports:{}},n=!0;try{b[e].call(r.exports,r,r.exports,l),n=!1}finally{n&&delete d[e]}return r.exports}l.m=b,e=[],l.O=function(t,r,n,f){if(r){f=f||0;for(var a=e.length;a>0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var c=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(o=!1,f0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var c=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(o=!1,fWebGPU Samples \ No newline at end of file +WebGPU Samples \ No newline at end of file diff --git a/samples/A-buffer.html b/samples/A-buffer.html index 904edae0..ceb5ef26 100644 --- a/samples/A-buffer.html +++ b/samples/A-buffer.html @@ -10,6 +10,6 @@ } A-Buffer - WebGPU Samples

A-Buffer

See it on Github!

Demonstrates order independent transparency using a per-pixel + limiting memory usage (when required)."/>

\ No newline at end of file + limiting memory usage (when required).

\ No newline at end of file diff --git a/samples/animometer.html b/samples/animometer.html index 08c109b3..7d2b399b 100644 --- a/samples/animometer.html +++ b/samples/animometer.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Animometer - WebGPU Samples \ No newline at end of file + Animometer - WebGPU Samples \ No newline at end of file diff --git a/samples/bitonicSort.html b/samples/bitonicSort.html index 27da25f2..11d66f48 100644 --- a/samples/bitonicSort.html +++ b/samples/bitonicSort.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file + Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file diff --git a/samples/cameras.html b/samples/cameras.html index ef02284f..14155f07 100644 --- a/samples/cameras.html +++ b/samples/cameras.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cameras - WebGPU Samples \ No newline at end of file + Cameras - WebGPU Samples \ No newline at end of file diff --git a/samples/computeBoids.html b/samples/computeBoids.html index f495728a..dcd63d62 100644 --- a/samples/computeBoids.html +++ b/samples/computeBoids.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Compute Boids - WebGPU Samples \ No newline at end of file + Compute Boids - WebGPU Samples \ No newline at end of file diff --git a/samples/cornell.html b/samples/cornell.html index 60e05cd7..2e0ae2e7 100644 --- a/samples/cornell.html +++ b/samples/cornell.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cornell box - WebGPU Samples \ No newline at end of file + Cornell box - WebGPU Samples \ No newline at end of file diff --git a/samples/cubemap.html b/samples/cubemap.html index 92963aef..d638efde 100644 --- a/samples/cubemap.html +++ b/samples/cubemap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cubemap - WebGPU Samples \ No newline at end of file + Cubemap - WebGPU Samples \ No newline at end of file diff --git a/samples/deferredRendering.html b/samples/deferredRendering.html index 5131e1dc..4c55f329 100644 --- a/samples/deferredRendering.html +++ b/samples/deferredRendering.html @@ -16,7 +16,7 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. - "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. + "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. Render geometry info to multiple targets in the gBuffers in the first pass. In this sample we have 2 gBuffers for normals and albedo, along with a depth texture. And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity. @@ -24,4 +24,4 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/fractalCube.html b/samples/fractalCube.html index ebc0d3a9..b1dbb172 100644 --- a/samples/fractalCube.html +++ b/samples/fractalCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Fractal Cube - WebGPU Samples \ No newline at end of file + Fractal Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/gameOfLife.html b/samples/gameOfLife.html index a54e0f04..d92c276e 100644 --- a/samples/gameOfLife.html +++ b/samples/gameOfLife.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Conway's Game of Life - WebGPU Samples \ No newline at end of file + Conway's Game of Life - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangle.html b/samples/helloTriangle.html index d8de0c33..22fd3113 100644 --- a/samples/helloTriangle.html +++ b/samples/helloTriangle.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle - WebGPU Samples \ No newline at end of file + Hello Triangle - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangleMSAA.html b/samples/helloTriangleMSAA.html index 3a24f02b..6e0e3c7d 100644 --- a/samples/helloTriangleMSAA.html +++ b/samples/helloTriangleMSAA.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle MSAA - WebGPU Samples \ No newline at end of file + Hello Triangle MSAA - WebGPU Samples \ No newline at end of file diff --git a/samples/imageBlur.html b/samples/imageBlur.html index 0bec7083..61306d7c 100644 --- a/samples/imageBlur.html +++ b/samples/imageBlur.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Image Blur - WebGPU Samples \ No newline at end of file + Image Blur - WebGPU Samples \ No newline at end of file diff --git a/samples/instancedCube.html b/samples/instancedCube.html index 7f1114ae..704990cf 100644 --- a/samples/instancedCube.html +++ b/samples/instancedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Instanced Cube - WebGPU Samples \ No newline at end of file + Instanced Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/normalMap.html b/samples/normalMap.html index 8a332c8f..803975dc 100644 --- a/samples/normalMap.html +++ b/samples/normalMap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Normal Mapping - WebGPU Samples \ No newline at end of file + Normal Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/particles.html b/samples/particles.html index 25a84402..7374ffa4 100644 --- a/samples/particles.html +++ b/samples/particles.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Particles - WebGPU Samples \ No newline at end of file + Particles - WebGPU Samples \ No newline at end of file diff --git a/samples/renderBundles.html b/samples/renderBundles.html index 909945b7..71ee057a 100644 --- a/samples/renderBundles.html +++ b/samples/renderBundles.html @@ -11,7 +11,7 @@ Render Bundles - WebGPU Samples

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of + of instancing to reduce draw overhead.)"/>

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of meshes individually as a proxy for a more complex scene in order to demonstrate the reduction in JavaScript time spent to issue render commands. (Typically a scene like this would make use - of instancing to reduce draw overhead.)

\ No newline at end of file + of instancing to reduce draw overhead.)

\ No newline at end of file diff --git a/samples/resizeCanvas.html b/samples/resizeCanvas.html index 6eef6227..532477fa 100644 --- a/samples/resizeCanvas.html +++ b/samples/resizeCanvas.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Resize Canvas - WebGPU Samples \ No newline at end of file + Resize Canvas - WebGPU Samples \ No newline at end of file diff --git a/samples/reversedZ.html b/samples/reversedZ.html index 890dff5e..40dae9ac 100644 --- a/samples/reversedZ.html +++ b/samples/reversedZ.html @@ -17,7 +17,7 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ - "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. + "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. The left column uses regular method, while the right one uses reversed z technique. Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other. Higher sets are placed further from camera (and are scaled for better visual purpose). @@ -26,4 +26,4 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/rotatingCube.html b/samples/rotatingCube.html index 39d76878..2069881b 100644 --- a/samples/rotatingCube.html +++ b/samples/rotatingCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Rotating Cube - WebGPU Samples \ No newline at end of file + Rotating Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/samplerParameters.html b/samples/samplerParameters.html index 818fcf5b..0c10973e 100644 --- a/samples/samplerParameters.html +++ b/samples/samplerParameters.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file + Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file diff --git a/samples/shadowMapping.html b/samples/shadowMapping.html index 63b664bd..f814bbda 100644 --- a/samples/shadowMapping.html +++ b/samples/shadowMapping.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Shadow Mapping - WebGPU Samples \ No newline at end of file + Shadow Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/texturedCube.html b/samples/texturedCube.html index 514cc9b9..7eadffa5 100644 --- a/samples/texturedCube.html +++ b/samples/texturedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Textured Cube - WebGPU Samples \ No newline at end of file + Textured Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/twoCubes.html b/samples/twoCubes.html index e09ad9dc..e5b2a487 100644 --- a/samples/twoCubes.html +++ b/samples/twoCubes.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Two Cubes - WebGPU Samples \ No newline at end of file + Two Cubes - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploading.html b/samples/videoUploading.html index 4359ce1b..40dff0f7 100644 --- a/samples/videoUploading.html +++ b/samples/videoUploading.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading - WebGPU Samples \ No newline at end of file + Video Uploading - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploadingWebCodecs.html b/samples/videoUploadingWebCodecs.html index a48bdce1..8665f1f8 100644 --- a/samples/videoUploadingWebCodecs.html +++ b/samples/videoUploadingWebCodecs.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file + Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file diff --git a/samples/worker.html b/samples/worker.html index ee6d0479..cd518ab6 100644 --- a/samples/worker.html +++ b/samples/worker.html @@ -10,6 +10,6 @@ } WebGPU in a Worker - WebGPU Samples

WebGPU in a Worker

See it on Github!

This example shows one method of using WebGPU in a web worker and presenting to + which is then transferred to the worker where all the WebGPU calls are made."/>

\ No newline at end of file + which is then transferred to the worker where all the WebGPU calls are made.

\ No newline at end of file