From 648e49b465eebf47dbf2b67182a5832b604516b2 Mon Sep 17 00:00:00 2001 From: greggman Date: Mon, 30 Oct 2023 23:22:43 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20=20@=203ff5c?= =?UTF-8?q?fa0d9c1d9c56637be1f93842a969e4d6a65=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 404.html | 4 ++-- .../samples/A-buffer.json | 0 .../samples/animometer.json | 0 .../samples/bitonicSort.json | 0 .../samples/cameras.json | 0 .../samples/computeBoids.json | 0 .../samples/cornell.json | 0 .../samples/cubemap.json | 0 .../samples/deferredRendering.json | 0 .../samples/fractalCube.json | 0 .../samples/gameOfLife.json | 0 .../samples/helloTriangle.json | 0 .../samples/helloTriangleMSAA.json | 0 .../samples/imageBlur.json | 0 .../samples/instancedCube.json | 0 .../samples/normalMap.json | 0 .../samples/particles.json | 0 .../samples/renderBundles.json | 0 .../samples/resizeCanvas.json | 0 .../samples/reversedZ.json | 0 .../samples/rotatingCube.json | 0 .../samples/samplerParameters.json | 0 .../samples/shadowMapping.json | 0 .../samples/texturedCube.json | 0 .../samples/twoCubes.json | 0 .../samples/videoUploading.json | 0 .../samples/videoUploadingWebCodecs.json | 0 .../samples/worker.json | 0 _next/static/chunks/118.83881cc2a26093f7.js | 1 - _next/static/chunks/118.be674f7ab6c92e48.js | 1 + _next/static/chunks/167.0cb2923e32255961.js | 1 - _next/static/chunks/167.ad599134368bd94d.js | 1 + _next/static/chunks/31.48d6f46eea121502.js | 1 + _next/static/chunks/31.9edde3db065401fc.js | 1 - _next/static/chunks/428.0485846e4c9c50a8.js | 1 - _next/static/chunks/428.5f30ed82e9153690.js | 1 + _next/static/chunks/432.0c104140e4e6aed7.js | 1 - _next/static/chunks/432.b708dc8c2555b6f7.js | 1 + _next/static/chunks/613.a0b21871f0f1166d.js | 1 + _next/static/chunks/613.fefb0c175c2d45b2.js | 1 - _next/static/chunks/677.341d6775960cfe08.js | 1 + _next/static/chunks/677.956018e927779b1e.js | 1 - _next/static/chunks/770.3da42912bb9098b5.js | 1 - _next/static/chunks/770.7ae9d850819591f8.js | 1 + .../{878.ac7b18c5949410b1.js => 878.616dc3f7dab79bc0.js} | 2 +- ...ebpack-dee32cb39c7d6840.js => webpack-b64fa792331afcf2.js} | 2 +- .../_buildManifest.js | 0 .../_ssgManifest.js | 0 index.html | 2 +- samples/A-buffer.html | 4 ++-- samples/animometer.html | 2 +- samples/bitonicSort.html | 2 +- samples/cameras.html | 2 +- samples/computeBoids.html | 2 +- samples/cornell.html | 2 +- samples/cubemap.html | 2 +- samples/deferredRendering.html | 4 ++-- samples/fractalCube.html | 2 +- samples/gameOfLife.html | 2 +- samples/helloTriangle.html | 2 +- samples/helloTriangleMSAA.html | 2 +- samples/imageBlur.html | 2 +- samples/instancedCube.html | 2 +- samples/normalMap.html | 2 +- samples/particles.html | 2 +- samples/renderBundles.html | 4 ++-- samples/resizeCanvas.html | 2 +- samples/reversedZ.html | 4 ++-- samples/rotatingCube.html | 2 +- samples/samplerParameters.html | 2 +- samples/shadowMapping.html | 2 +- samples/texturedCube.html | 2 +- samples/twoCubes.html | 2 +- samples/videoUploading.html | 2 +- samples/videoUploadingWebCodecs.html | 2 +- samples/worker.html | 4 ++-- 76 files changed, 45 insertions(+), 45 deletions(-) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/A-buffer.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/animometer.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/bitonicSort.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/cameras.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/computeBoids.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/cornell.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/cubemap.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/deferredRendering.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/fractalCube.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/gameOfLife.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/helloTriangle.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/helloTriangleMSAA.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/imageBlur.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/instancedCube.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/normalMap.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/particles.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/renderBundles.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/resizeCanvas.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/reversedZ.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/rotatingCube.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/samplerParameters.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/shadowMapping.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/texturedCube.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/twoCubes.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/videoUploading.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/videoUploadingWebCodecs.json (100%) rename _next/data/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/samples/worker.json (100%) delete mode 100644 _next/static/chunks/118.83881cc2a26093f7.js create mode 100644 _next/static/chunks/118.be674f7ab6c92e48.js delete mode 100644 _next/static/chunks/167.0cb2923e32255961.js create mode 100644 _next/static/chunks/167.ad599134368bd94d.js create mode 100644 _next/static/chunks/31.48d6f46eea121502.js delete mode 100644 _next/static/chunks/31.9edde3db065401fc.js delete mode 100644 _next/static/chunks/428.0485846e4c9c50a8.js create mode 100644 _next/static/chunks/428.5f30ed82e9153690.js delete mode 100644 _next/static/chunks/432.0c104140e4e6aed7.js create mode 100644 _next/static/chunks/432.b708dc8c2555b6f7.js create mode 100644 _next/static/chunks/613.a0b21871f0f1166d.js delete mode 100644 _next/static/chunks/613.fefb0c175c2d45b2.js create mode 100644 _next/static/chunks/677.341d6775960cfe08.js delete mode 100644 _next/static/chunks/677.956018e927779b1e.js delete mode 100644 _next/static/chunks/770.3da42912bb9098b5.js create mode 100644 _next/static/chunks/770.7ae9d850819591f8.js rename _next/static/chunks/{878.ac7b18c5949410b1.js => 878.616dc3f7dab79bc0.js} (69%) rename _next/static/chunks/{webpack-dee32cb39c7d6840.js => webpack-b64fa792331afcf2.js} (52%) rename _next/static/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/_buildManifest.js (100%) rename _next/static/{rzC4zdookA2uZQPm0xV1I => zFybhN_sEJJ4DmMVojx9G}/_ssgManifest.js (100%) diff --git a/404.html b/404.html index cccb8e56..b430cf8b 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/A-buffer.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/A-buffer.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/A-buffer.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/A-buffer.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/animometer.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/animometer.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/animometer.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/animometer.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/bitonicSort.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/bitonicSort.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/bitonicSort.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/bitonicSort.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/cameras.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/cameras.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/cameras.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/cameras.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/computeBoids.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/computeBoids.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/computeBoids.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/computeBoids.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/cornell.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/cornell.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/cornell.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/cornell.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/cubemap.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/cubemap.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/cubemap.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/cubemap.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/deferredRendering.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/deferredRendering.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/deferredRendering.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/deferredRendering.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/fractalCube.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/fractalCube.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/fractalCube.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/fractalCube.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/gameOfLife.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/gameOfLife.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/gameOfLife.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/gameOfLife.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/helloTriangle.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/helloTriangle.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/helloTriangle.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/helloTriangle.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/helloTriangleMSAA.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/helloTriangleMSAA.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/helloTriangleMSAA.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/helloTriangleMSAA.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/imageBlur.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/imageBlur.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/imageBlur.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/imageBlur.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/instancedCube.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/instancedCube.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/instancedCube.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/instancedCube.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/normalMap.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/normalMap.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/normalMap.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/normalMap.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/particles.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/particles.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/particles.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/particles.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/renderBundles.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/renderBundles.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/renderBundles.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/renderBundles.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/resizeCanvas.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/resizeCanvas.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/resizeCanvas.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/resizeCanvas.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/reversedZ.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/reversedZ.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/reversedZ.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/reversedZ.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/rotatingCube.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/rotatingCube.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/rotatingCube.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/rotatingCube.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/samplerParameters.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/samplerParameters.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/samplerParameters.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/samplerParameters.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/shadowMapping.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/shadowMapping.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/shadowMapping.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/shadowMapping.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/texturedCube.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/texturedCube.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/texturedCube.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/texturedCube.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/twoCubes.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/twoCubes.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/twoCubes.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/twoCubes.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/videoUploading.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/videoUploading.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/videoUploading.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/videoUploading.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/videoUploadingWebCodecs.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/videoUploadingWebCodecs.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/videoUploadingWebCodecs.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/videoUploadingWebCodecs.json diff --git a/_next/data/rzC4zdookA2uZQPm0xV1I/samples/worker.json b/_next/data/zFybhN_sEJJ4DmMVojx9G/samples/worker.json similarity index 100% rename from _next/data/rzC4zdookA2uZQPm0xV1I/samples/worker.json rename to _next/data/zFybhN_sEJJ4DmMVojx9G/samples/worker.json diff --git a/_next/static/chunks/118.83881cc2a26093f7.js b/_next/static/chunks/118.83881cc2a26093f7.js deleted file mode 100644 index f153808e..00000000 --- a/_next/static/chunks/118.83881cc2a26093f7.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[118],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),u=t.n(l);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,o.useRouter)(),h=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,v]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?v(h[1]):v(a[0].name),c&&l.current)for(l.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){v(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function p(e,n){if(!e)throw Error(n)}},7118:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return S}});var r,a,i=t(6416),o=t(5671),s="struct SpaceTransformUniforms {\n projMatrix: mat4x4f,\n viewMatrix: mat4x4f,\n modelMatrix: mat4x4f,\n}\n\nstruct Uniforms_MapInfo {\n mappingType: u32,\n lightPosX: f32,\n lightPosY: f32,\n lightPosZ: f32,\n lightIntensity: f32,\n depthScale: f32,\n depthLayers: f32,\n}\n\nstruct VertexInput {\n // Shader assumes the missing 4th float is 1.0\n @location(0) position : vec4f,\n @location(1) normal : vec3f,\n @location(2) uv : vec2f,\n @location(3) vert_tan: vec3f,\n @location(4) vert_bitan: vec3f,\n}\n\nstruct VertexOutput {\n @builtin(position) Position : vec4f,\n @location(0) normal: vec3f,\n @location(1) uv : vec2f,\n // Vertex position in world space\n @location(2) posWS: vec3f,\n // Vertex position in tangent space\n @location(3) posTS: vec3f,\n // View position in tangent space\n @location(4) viewTS: vec3f,\n // Extracted components of our tbn matrix\n @location(5) tbnTS0: vec3, \n @location(6) tbnTS1: vec3,\n @location(7) tbnTS2: vec3,\n}\n\n// Uniforms\n@group(0) @binding(0) var spaceTransform : SpaceTransformUniforms;\n@group(0) @binding(1) var mapInfo: Uniforms_MapInfo;\n\n// Texture info\n@group(1) @binding(0) var textureSampler: sampler;\n@group(1) @binding(1) var diffuseTexture: texture_2d;\n@group(1) @binding(2) var normalTexture: texture_2d;\n@group(1) @binding(3) var depthTexture: texture_2d;\n\nfn parallax_uv(\n uv: vec2f, \n viewDirTS: vec3f, \n depthSample: f32,\n depthScale: f32,\n) -> vec2f {\n if (mapInfo.mappingType == 4) {\n // Perturb uv coordinates based on depth and camera direction\n let p = viewDirTS.xy * (depthSample * depthScale) / viewDirTS.z;\n return uv - p;\n }\n // Break up depth space into layers\n let depthPerLayer = 1.0 / f32(mapInfo.depthLayers);\n // Start at lowest depth\n var currentDepth = 0.0;\n let delta_uv = viewDirTS.xy * depthScale / (viewDirTS.z * mapInfo.depthLayers);\n var prev_uv = uv;\n var cur_uv = uv;\n\n var depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n var prevDepthFromTexture = depthFromTexture;\n var prevCurrentDepth = currentDepth;\n for (var i: u32 = 0; i < 32; i++) {\n currentDepth += depthPerLayer;\n prev_uv = cur_uv;\n cur_uv -= delta_uv;\n depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n // Determine whether current depth is greater than depth map\n // Once we reach a certain threshold, we stop updating cur_uv\n cur_uv = select(cur_uv, prev_uv, depthFromTexture < currentDepth);\n prevDepthFromTexture = select(depthFromTexture, prevDepthFromTexture, prevDepthFromTexture < currentDepth);\n prevCurrentDepth = select(currentDepth, prevCurrentDepth, prevDepthFromTexture < currentDepth);\n }\n return cur_uv;\n}\n\nfn when_greater(v1: f32, v2: f32) -> f32 {\n return max(sign(v1 - v2), 0.0);\n}\n\n@vertex\nfn vertexMain(input: VertexInput) -> VertexOutput {\n var output : VertexOutput;\n // Create the Model to View Matrix\n let MV = spaceTransform.viewMatrix * spaceTransform.modelMatrix;\n // Create the Model to View to Projection Matrix\n let MVP = spaceTransform.projMatrix * MV;\n \n // Get Clip space transforms and pass through values out of the way\n output.Position = MVP * input.position;\n output.uv = input.uv;\n output.normal = input.normal;\n\n // Multiply pos by modelMatrix to get the vertex/fragment's position in world space\n output.posWS = vec3f((spaceTransform.modelMatrix * input.position).xyz);\n \n var MV3x3 = mat3x3f(\n MV[0].xyz,\n MV[1].xyz,\n MV[2].xyz\n );\n\n // Get unit vectors of normal, tangent, and bitangents in model space\n let vertexTangent = normalize(input.vert_tan);\n let vertexBitangent = normalize(input.vert_bitan);\n let vertexNormal = normalize(input.normal);\n\n // Convert tbn unit vectors to mv space for a model view tbn\n var tbnTS = transpose(\n MV3x3 * mat3x3f(\n vertexTangent,\n vertexBitangent,\n vertexNormal\n )\n );\n // Condense to vec3s so they can be passed to fragment shader\n output.tbnTS0 = tbnTS[0];\n output.tbnTS1 = tbnTS[1];\n output.tbnTS2 = tbnTS[2];\n\n // Get the tangent space position of the vertex\n output.posTS = tbnTS * (MV * input.position).xyz;\n // Get the tangent space position of the camera view\n output.viewTS = tbnTS * vec3f(0.0, 0.0, 0.0);\n\n return output;\n}\n\n@fragment\nfn fragmentMain(input: VertexOutput) -> @location(0) vec4f {\n // Reconstruct tbnTS\n let tbnTS = mat3x3f(\n input.tbnTS0,\n input.tbnTS1,\n input.tbnTS2,\n );\n\n // Get direction of view in tangent space\n let viewDirTS = normalize(input.viewTS - input.posTS);\n\n // Get position, direction, and distance of light in tangent space (no need to multiply by model matrix as there is no model)\n let lightPosVS = spaceTransform.viewMatrix * vec4f(mapInfo.lightPosX, mapInfo.lightPosY, mapInfo.lightPosZ, 1.0);\n let lightPosTS = tbnTS * lightPosVS.xyz;\n let lightDirTS = normalize(lightPosTS - input.posTS);\n let lightDistanceTS = distance(input.posTS, lightPosTS);\n\n let depthMap = textureSample(depthTexture, textureSampler, input.uv); \n\n let uv = select(\n parallax_uv(input.uv, viewDirTS, depthMap.r, mapInfo.depthScale),\n input.uv,\n mapInfo.mappingType < 4\n );\n\n // Get values from textures\n let diffuseMap = textureSample(diffuseTexture, textureSampler, uv);\n let normalMap = textureSample(normalTexture, textureSampler, uv);\n\n // Get normal in tangent space\n let normalTS = normalize((normalMap.xyz * 2.0) - 1.0);\n \n // Calculate diffusion lighting\n let lightColorIntensity = vec3f(255.0, 255.0, 255.0) * mapInfo.lightIntensity;\n //How similar is the normal to the lightDirection\n let diffuseStrength = clamp(\n dot(normalTS, lightDirTS), 0.0, 1.0\n );\n // Strenght inversely proportional to square of distance from light\n let diffuseLight = (lightColorIntensity * diffuseStrength) / (lightDistanceTS * lightDistanceTS);\n\n switch (mapInfo.mappingType) {\n // Output the diffuse texture\n case 0: {\n return vec4f(diffuseMap.rgb, 1.0);\n }\n // Output the normal map\n case 1: {\n return vec4f(normalMap.rgb, 1.0);\n }\n // Output the height map\n case 2: {\n return vec4f(depthMap.rgb, 1.0);\n }\n default: {\n return vec4f(diffuseMap.rgb * diffuseLight, 1.0);\n }\n }\n}";let l=function(e,n){let t=arguments.length>2&&void 0!==arguments[2]&&arguments[2],r=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=t?GPUBufferUsage.VERTEX|GPUBufferUsage.STORAGE:GPUBufferUsage.VERTEX,i=r?GPUBufferUsage.INDEX|GPUBufferUsage.STORAGE:GPUBufferUsage.INDEX,o=e.createBuffer({size:n.vertices.byteLength,usage:a,mappedAtCreation:!0});new Float32Array(o.getMappedRange()).set(n.vertices),o.unmap();let s=e.createBuffer({size:n.indices.byteLength,usage:i,mappedAtCreation:!0});return n.indices.byteLength===n.indices.length*Uint16Array.BYTES_PER_ELEMENT?new Uint16Array(s.getMappedRange()).set(n.indices):new Uint32Array(s.getMappedRange()).set(n.indices),s.unmap(),{vertexBuffer:o,indexBuffer:s,indexCount:n.indices.length}},u=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+0,3);return i.R3.fromValues(t[0],t[1],t[2])},c=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+6*Float32Array.BYTES_PER_ELEMENT,2);return i.K4.fromValues(t[0],t[1])},d=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1;r=Math.floor(r),a=Math.floor(a),o=Math.floor(o);let s=[],l=[],u=0,c=(e,n,t,r,a,o,c,d,p,m)=>{let h=o/p,g=c/m,f=o/2,x=c/2,v=d/2,b=p+1,y=m+1,T=0,S=i.R3.create(),w=i.R3.create();for(let P=0;P0?1:-1,l.push(...w),l.push(G/p),l.push(1-P/m),T+=1}}for(let M=0;M0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,o=arguments.length>6&&void 0!==arguments[6]?arguments[6]:"uint16",{vertices:s,indices:l}=d(e,n,t,r,a,i),u=8*Float32Array.BYTES_PER_ELEMENT,c="uint16"===o?new Uint16Array(l):new Uint32Array(l);return{vertices:new Float32Array(s),indices:c,vertexStride:u}},m=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,s=p(e,n,t,r,a,o),l=s.vertexStride/Float32Array.BYTES_PER_ELEMENT,d=s.vertices.length/l,m=Array(d),h=Array(d),g=Array(d);for(let f=0;f{let s=[];for(let l=0;l{let n=e.split("x"),t=parseInt(n[0].replace(/[^0-9]/g,""))/8,r=t*(void 0!==n[1]?parseInt(n[1]):1);return r},f=e=>{let n=e.reduce((e,n,t)=>{let r={shaderLocation:t,offset:e.arrayStride,format:n},a=e.arrayStride+g(n),i={attributes:[...e.attributes,r],arrayStride:a};return i},{attributes:[],arrayStride:0}),t={arrayStride:n.arrayStride,attributes:n.attributes};return t},x=function(e,n,t,r,a,i,o){let s=arguments.length>7&&void 0!==arguments[7]&&arguments[7],l=arguments.length>8&&void 0!==arguments[8]?arguments[8]:"triangle-list",u=arguments.length>9&&void 0!==arguments[9]?arguments[9]:"back",c={label:"".concat(n,".pipeline"),layout:e.createPipelineLayout({label:"".concat(n,".pipelineLayout"),bindGroupLayouts:t}),vertex:{module:e.createShaderModule({label:"".concat(n,".vertexShader"),code:r}),entryPoint:"vertexMain",buffers:0!==a.length?[f(a)]:[]},fragment:{module:e.createShaderModule({label:"".concat(n,".fragmentShader"),code:i}),entryPoint:"fragmentMain",targets:[{format:o}]},primitive:{topology:l,cullMode:u}};return s&&(c.depthStencil={depthCompare:"less",depthWriteEnabled:!0,format:"depth24plus"}),e.createRenderPipeline(c)},v=(e,n)=>{let t=e.createTexture({size:[n.width,n.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});return e.queue.copyExternalImageToTexture({source:n},{texture:t},[n.width,n.height]),t};var b="src/sample/normalMap/main.ts";(r=a||(a={}))[r.Spiral=0]="Spiral",r[r.Toybox=1]="Toybox",r[r.BrickWall=2]="BrickWall";let y=async e=>{let n,t,r,o,u,c,d,p,{canvas:g,pageState:f,gui:b}=e,y=await navigator.gpu.requestAdapter(),T=await y.requestDevice();if(!f.active)return;let S=g.getContext("webgpu"),w=window.devicePixelRatio;g.width=g.clientWidth*w,g.height=g.clientHeight*w;let P=navigator.gpu.getPreferredCanvasFormat();S.configure({device:T,format:P,alphaMode:"premultiplied"});let B={"Bump Mode":"Normal Map",cameraPosX:0,cameraPosY:.8,cameraPosZ:-1.4,lightPosX:1.7,lightPosY:.7,lightPosZ:-1.9,lightIntensity:.02,depthScale:.05,depthLayers:16,Texture:"Spiral","Reset Light"(){}},G=T.createTexture({size:[g.width,g.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),U=T.createBuffer({size:256,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),M=T.createBuffer({size:7*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let E=await fetch("/assets/img/wood_diffuse.png"),V=await createImageBitmap(await E.blob());n=v(T,V)}{let A=await fetch("/assets/img/spiral_normal.png"),F=await createImageBitmap(await A.blob());t=v(T,F)}{let _=await fetch("/assets/img/spiral_height.png"),D=await createImageBitmap(await _.blob());r=v(T,D)}{let R=await fetch("/assets/img/toybox_normal.png"),I=await createImageBitmap(await R.blob());o=v(T,I)}{let C=await fetch("/assets/img/toybox_height.png"),L=await createImageBitmap(await C.blob());u=v(T,L)}{let N=await fetch("/assets/img/brickwall_diffuse.png"),j=await createImageBitmap(await N.blob());c=v(T,j)}{let Y=await fetch("/assets/img/brickwall_normal.png"),O=await createImageBitmap(await Y.blob());d=v(T,O)}{let X=await fetch("/assets/img/brickwall_height.png"),z=await createImageBitmap(await X.blob());p=v(T,z)}let k=T.createSampler({magFilter:"linear",minFilter:"linear"}),H={colorAttachments:[{view:void 0,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:G.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},q=l(T,m(1,1,1)),W=h([0,1],[GPUShaderStage.VERTEX|GPUShaderStage.FRAGMENT,GPUShaderStage.FRAGMENT|GPUShaderStage.VERTEX],["buffer","buffer"],[{type:"uniform"},{type:"uniform"}],[[{buffer:U},{buffer:M}]],"Frame",T),Z=h([0,1,2,3],[GPUShaderStage.FRAGMENT],["sampler","texture","texture","texture"],[{type:"filtering"},{sampleType:"float"},{sampleType:"float"},{sampleType:"float"}],[[k,n.createView(),t.createView(),r.createView()],[k,n.createView(),o.createView(),u.createView()],[k,c.createView(),d.createView(),p.createView()]],"Surface",T),$=g.width/g.height,K=i._E.perspective(2*Math.PI/5,$,.1,10),J=()=>{switch(B["Bump Mode"]){case"Diffuse Texture":return 0;case"Normal Texture":return 1;case"Depth Texture":return 2;case"Normal Map":return 3;case"Parallax Scale":return 4;case"Steep Parallax":return 5}},Q=x(T,"NormalMappingRender",[W.bindGroupLayout,Z.bindGroupLayout],s,["float32x3","float32x3","float32x2","float32x3","float32x3"],s,P,!0),ee=0,en=()=>{ee=a[B.Texture]};b.add(B,"Bump Mode",["Diffuse Texture","Normal Texture","Depth Texture","Normal Map","Parallax Scale","Steep Parallax"]),b.add(B,"Texture",["Spiral","Toybox","BrickWall"]).onChange(en);let et=b.addFolder("Light"),er=b.addFolder("Depth");et.add(B,"Reset Light").onChange(()=>{ea.setValue(1.7),ei.setValue(.7),eo.setValue(-1.9),es.setValue(.02)});let ea=et.add(B,"lightPosX",-5,5).step(.1),ei=et.add(B,"lightPosY",-5,5).step(.1),eo=et.add(B,"lightPosZ",-5,5).step(.1),es=et.add(B,"lightIntensity",0,.1).step(.002);er.add(B,"depthScale",0,.1).step(.01),er.add(B,"depthLayers",1,32).step(1),requestAnimationFrame(function e(){if(!f.active)return;let n=i._E.lookAt([B.cameraPosX,B.cameraPosY,B.cameraPosZ],[0,0,0],[0,1,0]),t=function(){let e=i._E.create();i._E.identity(e);let n=Date.now()/1e3;return i._E.rotateY(e,-.5*n,e),e}(),r=new Float32Array([...K,...n,...t]),a=J();T.queue.writeBuffer(U,0,r.buffer,r.byteOffset,r.byteLength),T.queue.writeBuffer(M,0,new Uint32Array([a])),T.queue.writeBuffer(M,4,new Float32Array([B.lightPosX,B.lightPosY,B.lightPosZ,B.lightIntensity,B.depthScale,B.depthLayers])),H.colorAttachments[0].view=S.getCurrentTexture().createView();let o=T.createCommandEncoder(),s=o.beginRenderPass(H);s.setPipeline(Q),s.setBindGroup(0,W.bindGroups[0]),s.setBindGroup(1,Z.bindGroups[ee]),s.setVertexBuffer(0,q.vertexBuffer),s.setIndexBuffer(q.indexBuffer,"uint16"),s.drawIndexed(q.indexCount),s.end(),T.queue.submit([o.finish()]),requestAnimationFrame(e)})},T=()=>(0,o.Tl)({name:"Normal Mapping",description:"This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.",gui:!0,init:y,sources:[{name:b.substring(21),contents:"import { mat4 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport normalMapWGSL from './normalMap.wgsl';\nimport { createMeshRenderable } from '../../meshes/mesh';\nimport { createBoxMeshWithTangents } from '../../meshes/box';\nimport {\n createBindGroupDescriptor,\n create3DRenderPipeline,\n createTextureFromImage,\n} from './utils';\n\nconst MAT4X4_BYTES = 64;\nenum TextureAtlas {\n Spiral,\n Toybox,\n BrickWall,\n}\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n interface GUISettings {\n 'Bump Mode':\n | 'Diffuse Texture'\n | 'Normal Texture'\n | 'Depth Texture'\n | 'Normal Map'\n | 'Parallax Scale'\n | 'Steep Parallax';\n cameraPosX: number;\n cameraPosY: number;\n cameraPosZ: number;\n lightPosX: number;\n lightPosY: number;\n lightPosZ: number;\n lightIntensity: number;\n depthScale: number;\n depthLayers: number;\n Texture: string;\n 'Reset Light': () => void;\n }\n\n const settings: GUISettings = {\n 'Bump Mode': 'Normal Map',\n cameraPosX: 0.0,\n cameraPosY: 0.8,\n cameraPosZ: -1.4,\n lightPosX: 1.7,\n lightPosY: 0.7,\n lightPosZ: -1.9,\n lightIntensity: 0.02,\n depthScale: 0.05,\n depthLayers: 16,\n Texture: 'Spiral',\n 'Reset Light': () => {\n return;\n },\n };\n\n // Create normal mapping resources and pipeline\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBuffer = device.createBuffer({\n // Buffer holding projection, view, and model matrices plus padding bytes\n size: MAT4X4_BYTES * 4,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const mapMethodBuffer = device.createBuffer({\n // Buffer holding mapping type, light uniforms, and depth uniforms\n size: Float32Array.BYTES_PER_ELEMENT * 7,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let woodDiffuseTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/wood_diffuse.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n woodDiffuseTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let spiralNormalTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/spiral_normal.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n spiralNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let spiralHeightTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/spiral_height.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n spiralHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let toyboxNormalTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/toybox_normal.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n toyboxNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let toyboxHeightTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/toybox_height.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n toyboxHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallDiffuseTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/brickwall_diffuse.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallDiffuseTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallNormalTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/brickwall_normal.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallHeightTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/brickwall_height.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const box = createMeshRenderable(\n device,\n createBoxMeshWithTangents(1.0, 1.0, 1.0)\n );\n\n // Uniform bindGroups and bindGroupLayout\n const frameBGDescriptor = createBindGroupDescriptor(\n [0, 1],\n [\n GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n GPUShaderStage.FRAGMENT | GPUShaderStage.VERTEX,\n ],\n ['buffer', 'buffer'],\n [{ type: 'uniform' }, { type: 'uniform' }],\n [[{ buffer: uniformBuffer }, { buffer: mapMethodBuffer }]],\n 'Frame',\n device\n );\n\n // Texture bindGroups and bindGroupLayout\n const surfaceBGDescriptor = createBindGroupDescriptor(\n [0, 1, 2, 3],\n [GPUShaderStage.FRAGMENT],\n ['sampler', 'texture', 'texture', 'texture'],\n [\n { type: 'filtering' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n ],\n // Multiple bindgroups that accord to the layout defined above\n [\n [\n sampler,\n woodDiffuseTexture.createView(),\n spiralNormalTexture.createView(),\n spiralHeightTexture.createView(),\n ],\n [\n sampler,\n woodDiffuseTexture.createView(),\n toyboxNormalTexture.createView(),\n toyboxHeightTexture.createView(),\n ],\n [\n sampler,\n brickwallDiffuseTexture.createView(),\n brickwallNormalTexture.createView(),\n brickwallHeightTexture.createView(),\n ],\n ],\n 'Surface',\n device\n );\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 0.1,\n 10.0\n ) as Float32Array;\n\n function getViewMatrix() {\n return mat4.lookAt(\n [settings.cameraPosX, settings.cameraPosY, settings.cameraPosZ],\n [0, 0, 0],\n [0, 1, 0]\n );\n }\n\n function getModelMatrix() {\n const modelMatrix = mat4.create();\n mat4.identity(modelMatrix);\n const now = Date.now() / 1000;\n mat4.rotateY(modelMatrix, now * -0.5, modelMatrix);\n return modelMatrix;\n }\n\n // Change the model mapping type\n const getMappingType = (): number => {\n switch (settings['Bump Mode']) {\n case 'Diffuse Texture':\n return 0;\n case 'Normal Texture':\n return 1;\n case 'Depth Texture':\n return 2;\n case 'Normal Map':\n return 3;\n case 'Parallax Scale':\n return 4;\n case 'Steep Parallax':\n return 5;\n }\n };\n\n const texturedCubePipeline = create3DRenderPipeline(\n device,\n 'NormalMappingRender',\n [frameBGDescriptor.bindGroupLayout, surfaceBGDescriptor.bindGroupLayout],\n normalMapWGSL,\n // Position, normal uv tangent bitangent\n ['float32x3', 'float32x3', 'float32x2', 'float32x3', 'float32x3'],\n normalMapWGSL,\n presentationFormat,\n true\n );\n\n let currentSurfaceBindGroup = 0;\n const onChangeTexture = () => {\n currentSurfaceBindGroup = TextureAtlas[settings.Texture];\n };\n\n gui.add(settings, 'Bump Mode', [\n 'Diffuse Texture',\n 'Normal Texture',\n 'Depth Texture',\n 'Normal Map',\n 'Parallax Scale',\n 'Steep Parallax',\n ]);\n gui\n .add(settings, 'Texture', ['Spiral', 'Toybox', 'BrickWall'])\n .onChange(onChangeTexture);\n const lightFolder = gui.addFolder('Light');\n const depthFolder = gui.addFolder('Depth');\n lightFolder.add(settings, 'Reset Light').onChange(() => {\n lightPosXController.setValue(1.7);\n lightPosYController.setValue(0.7);\n lightPosZController.setValue(-1.9);\n lightIntensityController.setValue(0.02);\n });\n const lightPosXController = lightFolder\n .add(settings, 'lightPosX', -5, 5)\n .step(0.1);\n const lightPosYController = lightFolder\n .add(settings, 'lightPosY', -5, 5)\n .step(0.1);\n const lightPosZController = lightFolder\n .add(settings, 'lightPosZ', -5, 5)\n .step(0.1);\n const lightIntensityController = lightFolder\n .add(settings, 'lightIntensity', 0.0, 0.1)\n .step(0.002);\n depthFolder.add(settings, 'depthScale', 0.0, 0.1).step(0.01);\n depthFolder.add(settings, 'depthLayers', 1, 32).step(1);\n\n function frame() {\n if (!pageState.active) return;\n\n // Write to normal map shader\n const viewMatrix = getViewMatrix();\n\n const modelMatrix = getModelMatrix();\n\n const matrices = new Float32Array([\n ...projectionMatrix,\n ...viewMatrix,\n ...modelMatrix,\n ]);\n\n const mappingType = getMappingType();\n\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n matrices.buffer,\n matrices.byteOffset,\n matrices.byteLength\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 0,\n new Uint32Array([mappingType])\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 4,\n new Float32Array([\n settings.lightPosX,\n settings.lightPosY,\n settings.lightPosZ,\n settings.lightIntensity,\n settings.depthScale,\n settings.depthLayers,\n ])\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n // Draw textured Cube\n passEncoder.setPipeline(texturedCubePipeline);\n passEncoder.setBindGroup(0, frameBGDescriptor.bindGroups[0]);\n passEncoder.setBindGroup(\n 1,\n surfaceBGDescriptor.bindGroups[currentSurfaceBindGroup]\n );\n passEncoder.setVertexBuffer(0, box.vertexBuffer);\n passEncoder.setIndexBuffer(box.indexBuffer, 'uint16');\n passEncoder.drawIndexed(box.indexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst NormalMapping: () => JSX.Element = () =>\n makeSample({\n name: 'Normal Mapping',\n description:\n 'This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './normalMap.wgsl',\n contents: normalMapWGSL,\n editable: true,\n },\n {\n name: '../../meshes/box.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/box.ts').default,\n },\n {\n name: '../../meshes/mesh.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/mesh.ts').default,\n },\n {\n name: './utils.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!./utils.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default NormalMapping;\n"},{name:"./normalMap.wgsl",contents:s,editable:!0},{name:"../../meshes/box.ts",contents:t(3583).Z},{name:"../../meshes/mesh.ts",contents:t(3150).Z},{name:"./utils.ts",contents:t(1146).Z}],filename:b});var S=T},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},3583:function(e,n){"use strict";n.Z="import { vec3 } from 'wgpu-matrix';\nimport { getMeshPosAtIndex, getMeshUVAtIndex, Mesh } from './mesh';\n\nexport interface BoxMesh extends Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n//// Borrowed and simplified from https://github.com/mrdoob/three.js/blob/master/src/geometries/BoxGeometry.js\n//// Presumes vertex buffer alignment of verts, normals, and uvs\nconst createBoxGeometry = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n) => {\n widthSegments = Math.floor(widthSegments);\n heightSegments = Math.floor(heightSegments);\n depthSegments = Math.floor(depthSegments);\n\n const indices = [];\n const vertNormalUVBuffer = [];\n\n let numVertices = 0;\n\n const buildPlane = (\n u: 0 | 1 | 2,\n v: 0 | 1 | 2,\n w: 0 | 1 | 2,\n udir: -1 | 1,\n vdir: -1 | 1,\n planeWidth: number,\n planeHeight: number,\n planeDepth: number,\n xSections: number,\n ySections: number\n ) => {\n const segmentWidth = planeWidth / xSections;\n const segmentHeight = planeHeight / ySections;\n\n const widthHalf = planeWidth / 2;\n const heightHalf = planeHeight / 2;\n const depthHalf = planeDepth / 2;\n\n const gridX1 = xSections + 1;\n const gridY1 = ySections + 1;\n\n let vertexCounter = 0;\n\n const vertex = vec3.create();\n const normal = vec3.create();\n for (let iy = 0; iy < gridY1; iy++) {\n const y = iy * segmentHeight - heightHalf;\n\n for (let ix = 0; ix < gridX1; ix++) {\n const x = ix * segmentWidth - widthHalf;\n\n //Calculate plane vertices\n vertex[u] = x * udir;\n vertex[v] = y * vdir;\n vertex[w] = depthHalf;\n vertNormalUVBuffer.push(...vertex);\n\n //Caclulate normal\n normal[u] = 0;\n normal[v] = 0;\n normal[w] = planeDepth > 0 ? 1.0 : -1.0;\n vertNormalUVBuffer.push(...normal);\n\n //Calculate uvs\n vertNormalUVBuffer.push(ix / xSections);\n vertNormalUVBuffer.push(1 - iy / ySections);\n\n vertexCounter += 1;\n }\n }\n\n for (let iy = 0; iy < ySections; iy++) {\n for (let ix = 0; ix < xSections; ix++) {\n const a = numVertices + ix + gridX1 * iy;\n const b = numVertices + ix + gridX1 * (iy + 1);\n const c = numVertices + (ix + 1) + gridX1 * (iy + 1);\n const d = numVertices + (ix + 1) + gridX1 * iy;\n\n //Push vertex indices\n //6 indices for each face\n indices.push(a, b, d);\n indices.push(b, c, d);\n\n numVertices += vertexCounter;\n }\n }\n };\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n -1,\n -1,\n depth,\n height,\n width,\n depthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n 1,\n -1,\n depth,\n height,\n -width,\n depthSegments,\n heightSegments\n );\n\n //Bottom face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n 1,\n width,\n depth,\n height,\n widthSegments,\n depthSegments\n );\n\n //Top face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n -1,\n width,\n depth,\n -height,\n widthSegments,\n depthSegments\n );\n\n //Side faces\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n 1,\n -1,\n width,\n height,\n depth,\n widthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n -1,\n -1,\n width,\n height,\n -depth,\n widthSegments,\n heightSegments\n );\n\n return {\n vertices: vertNormalUVBuffer,\n indices: indices,\n };\n};\n\ntype IndexFormat = 'uint16' | 'uint32';\n\n// Box mesh code ported from threejs, with addition of indexFormat specifier for vertex pulling\nexport const createBoxMesh = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0,\n indexFormat: IndexFormat = 'uint16'\n): Mesh => {\n const { vertices, indices } = createBoxGeometry(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const vertexStride = 8 * Float32Array.BYTES_PER_ELEMENT; //calculateVertexStride(vertexProperties);\n\n const indicesArray =\n indexFormat === 'uint16'\n ? new Uint16Array(indices)\n : new Uint32Array(indices);\n\n return {\n vertices: new Float32Array(vertices),\n indices: indicesArray,\n vertexStride: vertexStride,\n };\n};\n\nexport const createBoxMeshWithTangents = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n): Mesh => {\n const mesh = createBoxMesh(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const originalStrideElements =\n mesh.vertexStride / Float32Array.BYTES_PER_ELEMENT;\n\n const vertexCount = mesh.vertices.length / originalStrideElements;\n\n const tangents = new Array(vertexCount);\n const bitangents = new Array(vertexCount);\n const counts = new Array(vertexCount);\n for (let i = 0; i < vertexCount; i++) {\n tangents[i] = [0, 0, 0];\n bitangents[i] = [0, 0, 0];\n counts[i] = 0;\n }\n\n for (let i = 0; i < mesh.indices.length; i += 3) {\n const [idx1, idx2, idx3] = [\n mesh.indices[i],\n mesh.indices[i + 1],\n mesh.indices[i + 2],\n ];\n\n const [pos1, pos2, pos3] = [\n getMeshPosAtIndex(mesh, idx1),\n getMeshPosAtIndex(mesh, idx2),\n getMeshPosAtIndex(mesh, idx3),\n ];\n\n const [uv1, uv2, uv3] = [\n getMeshUVAtIndex(mesh, idx1),\n getMeshUVAtIndex(mesh, idx2),\n getMeshUVAtIndex(mesh, idx3),\n ];\n\n const edge1 = vec3.sub(pos2, pos1);\n const edge2 = vec3.sub(pos3, pos1);\n const deltaUV1 = vec3.sub(uv2, uv1);\n const deltaUV2 = vec3.sub(uv3, uv1);\n\n // Edge of a triangle moves in both u and v direction (2d)\n // deltaU * tangent vector + deltav * bitangent\n // Manipulating the data into matrices, we get an equation\n\n const constantVal =\n 1.0 / (deltaUV1[0] * deltaUV2[1] - deltaUV1[1] * deltaUV2[0]);\n\n const tangent = [\n constantVal * (deltaUV2[1] * edge1[0] - deltaUV1[1] * edge2[0]),\n constantVal * (deltaUV2[1] * edge1[1] - deltaUV1[1] * edge2[1]),\n constantVal * (deltaUV2[1] * edge1[2] - deltaUV1[1] * edge2[2]),\n ];\n\n const bitangent = [\n constantVal * (-deltaUV2[0] * edge1[0] + deltaUV1[0] * edge2[0]),\n constantVal * (-deltaUV2[0] * edge1[1] + deltaUV1[0] * edge2[1]),\n constantVal * (-deltaUV2[0] * edge1[2] + deltaUV1[0] * edge2[2]),\n ];\n\n //Accumulate tangents and bitangents\n tangents[idx1] = vec3.add(tangents[idx1], tangent);\n bitangents[idx1] = vec3.add(bitangents[idx1], bitangent);\n tangents[idx2] = vec3.add(tangents[idx2], tangent);\n bitangents[idx2] = vec3.add(bitangents[idx2], bitangent);\n tangents[idx3] = vec3.add(tangents[idx3], tangent);\n bitangents[idx3] = vec3.add(bitangents[idx3], bitangent);\n\n //Increment index count\n counts[idx1]++;\n counts[idx2]++;\n counts[idx3]++;\n }\n\n for (let i = 0; i < tangents.length; i++) {\n tangents[i] = vec3.divScalar(tangents[i], counts[i]);\n bitangents[i] = vec3.divScalar(bitangents[i], counts[i]);\n }\n\n const newStrideElements = 14;\n const wTangentArray = new Float32Array(vertexCount * newStrideElements);\n\n for (let i = 0; i < vertexCount; i++) {\n //Copy original vertex data (pos, normal uv)\n wTangentArray.set(\n //Get the original vertex [8 elements] (3 ele pos, 3 ele normal, 2 ele uv)\n mesh.vertices.subarray(\n i * originalStrideElements,\n (i + 1) * originalStrideElements\n ),\n //And put it at the proper location in the new array [14 bytes = 8 og + 6 empty]\n i * newStrideElements\n );\n //For each vertex, place tangent after originalStride\n wTangentArray.set(\n tangents[i],\n i * newStrideElements + originalStrideElements\n );\n //Place bitangent after 3 elements of tangent\n wTangentArray.set(\n bitangents[i],\n i * newStrideElements + originalStrideElements + 3\n );\n }\n\n return {\n vertices: wTangentArray,\n indices: mesh.indices,\n vertexStride: mesh.vertexStride + Float32Array.BYTES_PER_ELEMENT * 3 * 2,\n };\n};\n"},3150:function(e,n){"use strict";n.Z="import { vec3, vec2 } from 'wgpu-matrix';\n\n// Defines what to pass to pipeline to render mesh\nexport interface Renderable {\n vertexBuffer: GPUBuffer;\n indexBuffer: GPUBuffer;\n indexCount: number;\n bindGroup?: GPUBindGroup;\n}\n\nexport interface Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n/**\n * @param {GPUDevice} device - A valid GPUDevice.\n * @param {Mesh} mesh - An indexed triangle-list mesh, containing its vertices, indices, and vertexStride (number of elements per vertex).\n * @param {boolean} storeVertices - A boolean flag indicating whether the vertexBuffer should be available to use as a storage buffer.\n * @returns {boolean} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createMeshRenderable = (\n device: GPUDevice,\n mesh: Mesh,\n storeVertices = false,\n storeIndices = false\n): Renderable => {\n // Define buffer usage\n const vertexBufferUsage = storeVertices\n ? GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.VERTEX;\n const indexBufferUsage = storeIndices\n ? GPUBufferUsage.INDEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.INDEX;\n\n // Create vertex and index buffers\n const vertexBuffer = device.createBuffer({\n size: mesh.vertices.byteLength,\n usage: vertexBufferUsage,\n mappedAtCreation: true,\n });\n new Float32Array(vertexBuffer.getMappedRange()).set(mesh.vertices);\n vertexBuffer.unmap();\n\n const indexBuffer = device.createBuffer({\n size: mesh.indices.byteLength,\n usage: indexBufferUsage,\n mappedAtCreation: true,\n });\n\n // Determine whether index buffer is indices are in uint16 or uint32 format\n if (\n mesh.indices.byteLength ===\n mesh.indices.length * Uint16Array.BYTES_PER_ELEMENT\n ) {\n new Uint16Array(indexBuffer.getMappedRange()).set(mesh.indices);\n } else {\n new Uint32Array(indexBuffer.getMappedRange()).set(mesh.indices);\n }\n\n indexBuffer.unmap();\n\n return {\n vertexBuffer,\n indexBuffer,\n indexCount: mesh.indices.length,\n };\n};\n\nexport const getMeshPosAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 0,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshNormalAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 3 * Float32Array.BYTES_PER_ELEMENT,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshUVAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 6 * Float32Array.BYTES_PER_ELEMENT,\n 2\n );\n return vec2.fromValues(arr[0], arr[1]);\n};\n"},1146:function(e,n){"use strict";n.Z="type BindGroupBindingLayout =\n | GPUBufferBindingLayout\n | GPUTextureBindingLayout\n | GPUSamplerBindingLayout\n | GPUStorageTextureBindingLayout\n | GPUExternalTextureBindingLayout;\n\nexport type BindGroupsObjectsAndLayout = {\n bindGroups: GPUBindGroup[];\n bindGroupLayout: GPUBindGroupLayout;\n};\n\ntype ResourceTypeName =\n | 'buffer'\n | 'texture'\n | 'sampler'\n | 'externalTexture'\n | 'storageTexture';\n\n/**\n * @param {number[]} bindings - The binding value of each resource in the bind group.\n * @param {number[]} visibilities - The GPUShaderStage visibility of the resource at the corresponding index.\n * @param {ResourceTypeName[]} resourceTypes - The resourceType at the corresponding index.\n * @returns {BindGroupsObjectsAndLayout} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createBindGroupDescriptor = (\n bindings: number[],\n visibilities: number[],\n resourceTypes: ResourceTypeName[],\n resourceLayouts: BindGroupBindingLayout[],\n resources: GPUBindingResource[][],\n label: string,\n device: GPUDevice\n): BindGroupsObjectsAndLayout => {\n // Create layout of each entry within a bindGroup\n const layoutEntries: GPUBindGroupLayoutEntry[] = [];\n for (let i = 0; i < bindings.length; i++) {\n layoutEntries.push({\n binding: bindings[i],\n visibility: visibilities[i % visibilities.length],\n [resourceTypes[i]]: resourceLayouts[i],\n });\n }\n\n // Apply entry layouts to bindGroupLayout\n const bindGroupLayout = device.createBindGroupLayout({\n label: `${label}.bindGroupLayout`,\n entries: layoutEntries,\n });\n\n // Create bindGroups that conform to the layout\n const bindGroups: GPUBindGroup[] = [];\n for (let i = 0; i < resources.length; i++) {\n const groupEntries: GPUBindGroupEntry[] = [];\n for (let j = 0; j < resources[0].length; j++) {\n groupEntries.push({\n binding: j,\n resource: resources[i][j],\n });\n }\n const newBindGroup = device.createBindGroup({\n label: `${label}.bindGroup${i}`,\n layout: bindGroupLayout,\n entries: groupEntries,\n });\n bindGroups.push(newBindGroup);\n }\n\n return {\n bindGroups,\n bindGroupLayout,\n };\n};\n\nexport type ShaderKeyInterface = {\n [K in T[number]]: number;\n};\n\ninterface AttribAcc {\n attributes: GPUVertexAttribute[];\n arrayStride: number;\n}\n\n/**\n * @param {GPUVertexFormat} vf - A valid GPUVertexFormat, representing a per-vertex value that can be passed to the vertex shader.\n * @returns {number} The number of bytes present in the value to be passed.\n */\nexport const convertVertexFormatToBytes = (vf: GPUVertexFormat): number => {\n const splitFormat = vf.split('x');\n const bytesPerElement = parseInt(splitFormat[0].replace(/[^0-9]/g, '')) / 8;\n\n const bytesPerVec =\n bytesPerElement *\n (splitFormat[1] !== undefined ? parseInt(splitFormat[1]) : 1);\n\n return bytesPerVec;\n};\n\n/** Creates a GPUVertexBuffer Layout that maps to an interleaved vertex buffer.\n * @param {GPUVertexFormat[]} vertexFormats - An array of valid GPUVertexFormats.\n * @returns {GPUVertexBufferLayout} A GPUVertexBufferLayout representing an interleaved vertex buffer.\n */\nexport const createVBuffer = (\n vertexFormats: GPUVertexFormat[]\n): GPUVertexBufferLayout => {\n const initialValue: AttribAcc = { attributes: [], arrayStride: 0 };\n\n const vertexBuffer = vertexFormats.reduce(\n (acc: AttribAcc, curr: GPUVertexFormat, idx: number) => {\n const newAttribute: GPUVertexAttribute = {\n shaderLocation: idx,\n offset: acc.arrayStride,\n format: curr,\n };\n const nextOffset: number =\n acc.arrayStride + convertVertexFormatToBytes(curr);\n\n const retVal: AttribAcc = {\n attributes: [...acc.attributes, newAttribute],\n arrayStride: nextOffset,\n };\n return retVal;\n },\n initialValue\n );\n\n const layout: GPUVertexBufferLayout = {\n arrayStride: vertexBuffer.arrayStride,\n attributes: vertexBuffer.attributes,\n };\n\n return layout;\n};\n\nexport const create3DRenderPipeline = (\n device: GPUDevice,\n label: string,\n bgLayouts: GPUBindGroupLayout[],\n vertexShader: string,\n vBufferFormats: GPUVertexFormat[],\n fragmentShader: string,\n presentationFormat: GPUTextureFormat,\n depthTest = false,\n topology: GPUPrimitiveTopology = 'triangle-list',\n cullMode: GPUCullMode = 'back'\n) => {\n const pipelineDescriptor: GPURenderPipelineDescriptor = {\n label: `${label}.pipeline`,\n layout: device.createPipelineLayout({\n label: `${label}.pipelineLayout`,\n bindGroupLayouts: bgLayouts,\n }),\n vertex: {\n module: device.createShaderModule({\n label: `${label}.vertexShader`,\n code: vertexShader,\n }),\n entryPoint: 'vertexMain',\n buffers:\n vBufferFormats.length !== 0 ? [createVBuffer(vBufferFormats)] : [],\n },\n fragment: {\n module: device.createShaderModule({\n label: `${label}.fragmentShader`,\n code: fragmentShader,\n }),\n entryPoint: 'fragmentMain',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: topology,\n cullMode: cullMode,\n },\n };\n if (depthTest) {\n pipelineDescriptor.depthStencil = {\n depthCompare: 'less',\n depthWriteEnabled: true,\n format: 'depth24plus',\n };\n }\n return device.createRenderPipeline(pipelineDescriptor);\n};\n\nexport const createTextureFromImage = (\n device: GPUDevice,\n bitmap: ImageBitmap\n) => {\n const texture: GPUTexture = device.createTexture({\n size: [bitmap.width, bitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: bitmap },\n { texture: texture },\n [bitmap.width, bitmap.height]\n );\n return texture;\n};\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/118.be674f7ab6c92e48.js b/_next/static/chunks/118.be674f7ab6c92e48.js new file mode 100644 index 00000000..6b5a4171 --- /dev/null +++ b/_next/static/chunks/118.be674f7ab6c92e48.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[118],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),u=t.n(l);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,o.useRouter)(),h=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,v]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?v(h[1]):v(a[0].name),c&&l.current)for(l.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){v(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function p(e,n){if(!e)throw Error(n)}},7118:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return S}});var r,a,i=t(6416),o=t(5671),s="struct SpaceTransformUniforms {\n projMatrix: mat4x4f,\n viewMatrix: mat4x4f,\n modelMatrix: mat4x4f,\n}\n\nstruct Uniforms_MapInfo {\n mappingType: u32,\n lightPosX: f32,\n lightPosY: f32,\n lightPosZ: f32,\n lightIntensity: f32,\n depthScale: f32,\n depthLayers: f32,\n}\n\nstruct VertexInput {\n // Shader assumes the missing 4th float is 1.0\n @location(0) position : vec4f,\n @location(1) normal : vec3f,\n @location(2) uv : vec2f,\n @location(3) vert_tan: vec3f,\n @location(4) vert_bitan: vec3f,\n}\n\nstruct VertexOutput {\n @builtin(position) Position : vec4f,\n @location(0) normal: vec3f,\n @location(1) uv : vec2f,\n // Vertex position in world space\n @location(2) posWS: vec3f,\n // Vertex position in tangent space\n @location(3) posTS: vec3f,\n // View position in tangent space\n @location(4) viewTS: vec3f,\n // Extracted components of our tbn matrix\n @location(5) tbnTS0: vec3, \n @location(6) tbnTS1: vec3,\n @location(7) tbnTS2: vec3,\n}\n\n// Uniforms\n@group(0) @binding(0) var spaceTransform : SpaceTransformUniforms;\n@group(0) @binding(1) var mapInfo: Uniforms_MapInfo;\n\n// Texture info\n@group(1) @binding(0) var textureSampler: sampler;\n@group(1) @binding(1) var diffuseTexture: texture_2d;\n@group(1) @binding(2) var normalTexture: texture_2d;\n@group(1) @binding(3) var depthTexture: texture_2d;\n\nfn parallax_uv(\n uv: vec2f, \n viewDirTS: vec3f, \n depthSample: f32,\n depthScale: f32,\n) -> vec2f {\n if (mapInfo.mappingType == 4) {\n // Perturb uv coordinates based on depth and camera direction\n let p = viewDirTS.xy * (depthSample * depthScale) / viewDirTS.z;\n return uv - p;\n }\n // Break up depth space into layers\n let depthPerLayer = 1.0 / f32(mapInfo.depthLayers);\n // Start at lowest depth\n var currentDepth = 0.0;\n let delta_uv = viewDirTS.xy * depthScale / (viewDirTS.z * mapInfo.depthLayers);\n var prev_uv = uv;\n var cur_uv = uv;\n\n var depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n var prevDepthFromTexture = depthFromTexture;\n var prevCurrentDepth = currentDepth;\n for (var i: u32 = 0; i < 32; i++) {\n currentDepth += depthPerLayer;\n prev_uv = cur_uv;\n cur_uv -= delta_uv;\n depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n // Determine whether current depth is greater than depth map\n // Once we reach a certain threshold, we stop updating cur_uv\n cur_uv = select(cur_uv, prev_uv, depthFromTexture < currentDepth);\n prevDepthFromTexture = select(depthFromTexture, prevDepthFromTexture, prevDepthFromTexture < currentDepth);\n prevCurrentDepth = select(currentDepth, prevCurrentDepth, prevDepthFromTexture < currentDepth);\n }\n return cur_uv;\n}\n\nfn when_greater(v1: f32, v2: f32) -> f32 {\n return max(sign(v1 - v2), 0.0);\n}\n\n@vertex\nfn vertexMain(input: VertexInput) -> VertexOutput {\n var output : VertexOutput;\n // Create the Model to View Matrix\n let MV = spaceTransform.viewMatrix * spaceTransform.modelMatrix;\n // Create the Model to View to Projection Matrix\n let MVP = spaceTransform.projMatrix * MV;\n \n // Get Clip space transforms and pass through values out of the way\n output.Position = MVP * input.position;\n output.uv = input.uv;\n output.normal = input.normal;\n\n // Multiply pos by modelMatrix to get the vertex/fragment's position in world space\n output.posWS = vec3f((spaceTransform.modelMatrix * input.position).xyz);\n \n var MV3x3 = mat3x3f(\n MV[0].xyz,\n MV[1].xyz,\n MV[2].xyz\n );\n\n // Get unit vectors of normal, tangent, and bitangents in model space\n let vertexTangent = normalize(input.vert_tan);\n let vertexBitangent = normalize(input.vert_bitan);\n let vertexNormal = normalize(input.normal);\n\n // Convert tbn unit vectors to mv space for a model view tbn\n var tbnTS = transpose(\n MV3x3 * mat3x3f(\n vertexTangent,\n vertexBitangent,\n vertexNormal\n )\n );\n // Condense to vec3s so they can be passed to fragment shader\n output.tbnTS0 = tbnTS[0];\n output.tbnTS1 = tbnTS[1];\n output.tbnTS2 = tbnTS[2];\n\n // Get the tangent space position of the vertex\n output.posTS = tbnTS * (MV * input.position).xyz;\n // Get the tangent space position of the camera view\n output.viewTS = tbnTS * vec3f(0.0, 0.0, 0.0);\n\n return output;\n}\n\n@fragment\nfn fragmentMain(input: VertexOutput) -> @location(0) vec4f {\n // Reconstruct tbnTS\n let tbnTS = mat3x3f(\n input.tbnTS0,\n input.tbnTS1,\n input.tbnTS2,\n );\n\n // Get direction of view in tangent space\n let viewDirTS = normalize(input.viewTS - input.posTS);\n\n // Get position, direction, and distance of light in tangent space (no need to multiply by model matrix as there is no model)\n let lightPosVS = spaceTransform.viewMatrix * vec4f(mapInfo.lightPosX, mapInfo.lightPosY, mapInfo.lightPosZ, 1.0);\n let lightPosTS = tbnTS * lightPosVS.xyz;\n let lightDirTS = normalize(lightPosTS - input.posTS);\n let lightDistanceTS = distance(input.posTS, lightPosTS);\n\n let depthMap = textureSample(depthTexture, textureSampler, input.uv); \n\n let uv = select(\n parallax_uv(input.uv, viewDirTS, depthMap.r, mapInfo.depthScale),\n input.uv,\n mapInfo.mappingType < 4\n );\n\n // Get values from textures\n let diffuseMap = textureSample(diffuseTexture, textureSampler, uv);\n let normalMap = textureSample(normalTexture, textureSampler, uv);\n\n // Get normal in tangent space\n let normalTS = normalize((normalMap.xyz * 2.0) - 1.0);\n \n // Calculate diffusion lighting\n let lightColorIntensity = vec3f(255.0, 255.0, 255.0) * mapInfo.lightIntensity;\n //How similar is the normal to the lightDirection\n let diffuseStrength = clamp(\n dot(normalTS, lightDirTS), 0.0, 1.0\n );\n // Strenght inversely proportional to square of distance from light\n let diffuseLight = (lightColorIntensity * diffuseStrength) / (lightDistanceTS * lightDistanceTS);\n\n switch (mapInfo.mappingType) {\n // Output the diffuse texture\n case 0: {\n return vec4f(diffuseMap.rgb, 1.0);\n }\n // Output the normal map\n case 1: {\n return vec4f(normalMap.rgb, 1.0);\n }\n // Output the height map\n case 2: {\n return vec4f(depthMap.rgb, 1.0);\n }\n default: {\n return vec4f(diffuseMap.rgb * diffuseLight, 1.0);\n }\n }\n}";let l=function(e,n){let t=arguments.length>2&&void 0!==arguments[2]&&arguments[2],r=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=t?GPUBufferUsage.VERTEX|GPUBufferUsage.STORAGE:GPUBufferUsage.VERTEX,i=r?GPUBufferUsage.INDEX|GPUBufferUsage.STORAGE:GPUBufferUsage.INDEX,o=e.createBuffer({size:n.vertices.byteLength,usage:a,mappedAtCreation:!0});new Float32Array(o.getMappedRange()).set(n.vertices),o.unmap();let s=e.createBuffer({size:n.indices.byteLength,usage:i,mappedAtCreation:!0});return n.indices.byteLength===n.indices.length*Uint16Array.BYTES_PER_ELEMENT?new Uint16Array(s.getMappedRange()).set(n.indices):new Uint32Array(s.getMappedRange()).set(n.indices),s.unmap(),{vertexBuffer:o,indexBuffer:s,indexCount:n.indices.length}},u=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+0,3);return i.R3.fromValues(t[0],t[1],t[2])},c=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+6*Float32Array.BYTES_PER_ELEMENT,2);return i.K4.fromValues(t[0],t[1])},d=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1;r=Math.floor(r),a=Math.floor(a),o=Math.floor(o);let s=[],l=[],u=0,c=(e,n,t,r,a,o,c,d,p,m)=>{let h=o/p,g=c/m,f=o/2,x=c/2,v=d/2,b=p+1,y=m+1,T=0,S=i.R3.create(),w=i.R3.create();for(let P=0;P0?1:-1,l.push(...w),l.push(G/p),l.push(1-P/m),T+=1}}for(let M=0;M0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,o=arguments.length>6&&void 0!==arguments[6]?arguments[6]:"uint16",{vertices:s,indices:l}=d(e,n,t,r,a,i),u=8*Float32Array.BYTES_PER_ELEMENT,c="uint16"===o?new Uint16Array(l):new Uint32Array(l);return{vertices:new Float32Array(s),indices:c,vertexStride:u}},m=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,s=p(e,n,t,r,a,o),l=s.vertexStride/Float32Array.BYTES_PER_ELEMENT,d=s.vertices.length/l,m=Array(d),h=Array(d),g=Array(d);for(let f=0;f{let s=[];for(let l=0;l{let n=e.split("x"),t=parseInt(n[0].replace(/[^0-9]/g,""))/8,r=t*(void 0!==n[1]?parseInt(n[1]):1);return r},f=e=>{let n=e.reduce((e,n,t)=>{let r={shaderLocation:t,offset:e.arrayStride,format:n},a=e.arrayStride+g(n),i={attributes:[...e.attributes,r],arrayStride:a};return i},{attributes:[],arrayStride:0}),t={arrayStride:n.arrayStride,attributes:n.attributes};return t},x=function(e,n,t,r,a,i,o){let s=arguments.length>7&&void 0!==arguments[7]&&arguments[7],l=arguments.length>8&&void 0!==arguments[8]?arguments[8]:"triangle-list",u=arguments.length>9&&void 0!==arguments[9]?arguments[9]:"back",c={label:"".concat(n,".pipeline"),layout:e.createPipelineLayout({label:"".concat(n,".pipelineLayout"),bindGroupLayouts:t}),vertex:{module:e.createShaderModule({label:"".concat(n,".vertexShader"),code:r}),entryPoint:"vertexMain",buffers:0!==a.length?[f(a)]:[]},fragment:{module:e.createShaderModule({label:"".concat(n,".fragmentShader"),code:i}),entryPoint:"fragmentMain",targets:[{format:o}]},primitive:{topology:l,cullMode:u}};return s&&(c.depthStencil={depthCompare:"less",depthWriteEnabled:!0,format:"depth24plus"}),e.createRenderPipeline(c)},v=(e,n)=>{let t=e.createTexture({size:[n.width,n.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});return e.queue.copyExternalImageToTexture({source:n},{texture:t},[n.width,n.height]),t};var b="src/sample/normalMap/main.ts";(r=a||(a={}))[r.Spiral=0]="Spiral",r[r.Toybox=1]="Toybox",r[r.BrickWall=2]="BrickWall";let y=async e=>{let n,t,r,o,u,c,d,p,{canvas:g,pageState:f,gui:b}=e,y=await navigator.gpu.requestAdapter(),T=await y.requestDevice();if(!f.active)return;let S=g.getContext("webgpu"),w=window.devicePixelRatio;g.width=g.clientWidth*w,g.height=g.clientHeight*w;let P=navigator.gpu.getPreferredCanvasFormat();S.configure({device:T,format:P,alphaMode:"premultiplied"});let B={"Bump Mode":"Normal Map",cameraPosX:0,cameraPosY:.8,cameraPosZ:-1.4,lightPosX:1.7,lightPosY:.7,lightPosZ:-1.9,lightIntensity:.02,depthScale:.05,depthLayers:16,Texture:"Spiral","Reset Light"(){}},G=T.createTexture({size:[g.width,g.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),U=T.createBuffer({size:256,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),M=T.createBuffer({size:7*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let E=await fetch("../assets/img/wood_diffuse.png"),V=await createImageBitmap(await E.blob());n=v(T,V)}{let A=await fetch("../assets/img/spiral_normal.png"),F=await createImageBitmap(await A.blob());t=v(T,F)}{let _=await fetch("../assets/img/spiral_height.png"),D=await createImageBitmap(await _.blob());r=v(T,D)}{let R=await fetch("../assets/img/toybox_normal.png"),I=await createImageBitmap(await R.blob());o=v(T,I)}{let C=await fetch("../assets/img/toybox_height.png"),L=await createImageBitmap(await C.blob());u=v(T,L)}{let N=await fetch("../assets/img/brickwall_diffuse.png"),j=await createImageBitmap(await N.blob());c=v(T,j)}{let Y=await fetch("../assets/img/brickwall_normal.png"),O=await createImageBitmap(await Y.blob());d=v(T,O)}{let X=await fetch("../assets/img/brickwall_height.png"),z=await createImageBitmap(await X.blob());p=v(T,z)}let k=T.createSampler({magFilter:"linear",minFilter:"linear"}),H={colorAttachments:[{view:void 0,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:G.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},q=l(T,m(1,1,1)),W=h([0,1],[GPUShaderStage.VERTEX|GPUShaderStage.FRAGMENT,GPUShaderStage.FRAGMENT|GPUShaderStage.VERTEX],["buffer","buffer"],[{type:"uniform"},{type:"uniform"}],[[{buffer:U},{buffer:M}]],"Frame",T),Z=h([0,1,2,3],[GPUShaderStage.FRAGMENT],["sampler","texture","texture","texture"],[{type:"filtering"},{sampleType:"float"},{sampleType:"float"},{sampleType:"float"}],[[k,n.createView(),t.createView(),r.createView()],[k,n.createView(),o.createView(),u.createView()],[k,c.createView(),d.createView(),p.createView()]],"Surface",T),$=g.width/g.height,K=i._E.perspective(2*Math.PI/5,$,.1,10),J=()=>{switch(B["Bump Mode"]){case"Diffuse Texture":return 0;case"Normal Texture":return 1;case"Depth Texture":return 2;case"Normal Map":return 3;case"Parallax Scale":return 4;case"Steep Parallax":return 5}},Q=x(T,"NormalMappingRender",[W.bindGroupLayout,Z.bindGroupLayout],s,["float32x3","float32x3","float32x2","float32x3","float32x3"],s,P,!0),ee=0,en=()=>{ee=a[B.Texture]};b.add(B,"Bump Mode",["Diffuse Texture","Normal Texture","Depth Texture","Normal Map","Parallax Scale","Steep Parallax"]),b.add(B,"Texture",["Spiral","Toybox","BrickWall"]).onChange(en);let et=b.addFolder("Light"),er=b.addFolder("Depth");et.add(B,"Reset Light").onChange(()=>{ea.setValue(1.7),ei.setValue(.7),eo.setValue(-1.9),es.setValue(.02)});let ea=et.add(B,"lightPosX",-5,5).step(.1),ei=et.add(B,"lightPosY",-5,5).step(.1),eo=et.add(B,"lightPosZ",-5,5).step(.1),es=et.add(B,"lightIntensity",0,.1).step(.002);er.add(B,"depthScale",0,.1).step(.01),er.add(B,"depthLayers",1,32).step(1),requestAnimationFrame(function e(){if(!f.active)return;let n=i._E.lookAt([B.cameraPosX,B.cameraPosY,B.cameraPosZ],[0,0,0],[0,1,0]),t=function(){let e=i._E.create();i._E.identity(e);let n=Date.now()/1e3;return i._E.rotateY(e,-.5*n,e),e}(),r=new Float32Array([...K,...n,...t]),a=J();T.queue.writeBuffer(U,0,r.buffer,r.byteOffset,r.byteLength),T.queue.writeBuffer(M,0,new Uint32Array([a])),T.queue.writeBuffer(M,4,new Float32Array([B.lightPosX,B.lightPosY,B.lightPosZ,B.lightIntensity,B.depthScale,B.depthLayers])),H.colorAttachments[0].view=S.getCurrentTexture().createView();let o=T.createCommandEncoder(),s=o.beginRenderPass(H);s.setPipeline(Q),s.setBindGroup(0,W.bindGroups[0]),s.setBindGroup(1,Z.bindGroups[ee]),s.setVertexBuffer(0,q.vertexBuffer),s.setIndexBuffer(q.indexBuffer,"uint16"),s.drawIndexed(q.indexCount),s.end(),T.queue.submit([o.finish()]),requestAnimationFrame(e)})},T=()=>(0,o.Tl)({name:"Normal Mapping",description:"This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.",gui:!0,init:y,sources:[{name:b.substring(21),contents:"import { mat4 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport normalMapWGSL from './normalMap.wgsl';\nimport { createMeshRenderable } from '../../meshes/mesh';\nimport { createBoxMeshWithTangents } from '../../meshes/box';\nimport {\n createBindGroupDescriptor,\n create3DRenderPipeline,\n createTextureFromImage,\n} from './utils';\n\nconst MAT4X4_BYTES = 64;\nenum TextureAtlas {\n Spiral,\n Toybox,\n BrickWall,\n}\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n interface GUISettings {\n 'Bump Mode':\n | 'Diffuse Texture'\n | 'Normal Texture'\n | 'Depth Texture'\n | 'Normal Map'\n | 'Parallax Scale'\n | 'Steep Parallax';\n cameraPosX: number;\n cameraPosY: number;\n cameraPosZ: number;\n lightPosX: number;\n lightPosY: number;\n lightPosZ: number;\n lightIntensity: number;\n depthScale: number;\n depthLayers: number;\n Texture: string;\n 'Reset Light': () => void;\n }\n\n const settings: GUISettings = {\n 'Bump Mode': 'Normal Map',\n cameraPosX: 0.0,\n cameraPosY: 0.8,\n cameraPosZ: -1.4,\n lightPosX: 1.7,\n lightPosY: 0.7,\n lightPosZ: -1.9,\n lightIntensity: 0.02,\n depthScale: 0.05,\n depthLayers: 16,\n Texture: 'Spiral',\n 'Reset Light': () => {\n return;\n },\n };\n\n // Create normal mapping resources and pipeline\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBuffer = device.createBuffer({\n // Buffer holding projection, view, and model matrices plus padding bytes\n size: MAT4X4_BYTES * 4,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const mapMethodBuffer = device.createBuffer({\n // Buffer holding mapping type, light uniforms, and depth uniforms\n size: Float32Array.BYTES_PER_ELEMENT * 7,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let woodDiffuseTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/wood_diffuse.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n woodDiffuseTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let spiralNormalTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/spiral_normal.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n spiralNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let spiralHeightTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/spiral_height.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n spiralHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let toyboxNormalTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/toybox_normal.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n toyboxNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let toyboxHeightTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/toybox_height.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n toyboxHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallDiffuseTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/brickwall_diffuse.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallDiffuseTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallNormalTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/brickwall_normal.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallHeightTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/brickwall_height.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const box = createMeshRenderable(\n device,\n createBoxMeshWithTangents(1.0, 1.0, 1.0)\n );\n\n // Uniform bindGroups and bindGroupLayout\n const frameBGDescriptor = createBindGroupDescriptor(\n [0, 1],\n [\n GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n GPUShaderStage.FRAGMENT | GPUShaderStage.VERTEX,\n ],\n ['buffer', 'buffer'],\n [{ type: 'uniform' }, { type: 'uniform' }],\n [[{ buffer: uniformBuffer }, { buffer: mapMethodBuffer }]],\n 'Frame',\n device\n );\n\n // Texture bindGroups and bindGroupLayout\n const surfaceBGDescriptor = createBindGroupDescriptor(\n [0, 1, 2, 3],\n [GPUShaderStage.FRAGMENT],\n ['sampler', 'texture', 'texture', 'texture'],\n [\n { type: 'filtering' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n ],\n // Multiple bindgroups that accord to the layout defined above\n [\n [\n sampler,\n woodDiffuseTexture.createView(),\n spiralNormalTexture.createView(),\n spiralHeightTexture.createView(),\n ],\n [\n sampler,\n woodDiffuseTexture.createView(),\n toyboxNormalTexture.createView(),\n toyboxHeightTexture.createView(),\n ],\n [\n sampler,\n brickwallDiffuseTexture.createView(),\n brickwallNormalTexture.createView(),\n brickwallHeightTexture.createView(),\n ],\n ],\n 'Surface',\n device\n );\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 0.1,\n 10.0\n ) as Float32Array;\n\n function getViewMatrix() {\n return mat4.lookAt(\n [settings.cameraPosX, settings.cameraPosY, settings.cameraPosZ],\n [0, 0, 0],\n [0, 1, 0]\n );\n }\n\n function getModelMatrix() {\n const modelMatrix = mat4.create();\n mat4.identity(modelMatrix);\n const now = Date.now() / 1000;\n mat4.rotateY(modelMatrix, now * -0.5, modelMatrix);\n return modelMatrix;\n }\n\n // Change the model mapping type\n const getMappingType = (): number => {\n switch (settings['Bump Mode']) {\n case 'Diffuse Texture':\n return 0;\n case 'Normal Texture':\n return 1;\n case 'Depth Texture':\n return 2;\n case 'Normal Map':\n return 3;\n case 'Parallax Scale':\n return 4;\n case 'Steep Parallax':\n return 5;\n }\n };\n\n const texturedCubePipeline = create3DRenderPipeline(\n device,\n 'NormalMappingRender',\n [frameBGDescriptor.bindGroupLayout, surfaceBGDescriptor.bindGroupLayout],\n normalMapWGSL,\n // Position, normal uv tangent bitangent\n ['float32x3', 'float32x3', 'float32x2', 'float32x3', 'float32x3'],\n normalMapWGSL,\n presentationFormat,\n true\n );\n\n let currentSurfaceBindGroup = 0;\n const onChangeTexture = () => {\n currentSurfaceBindGroup = TextureAtlas[settings.Texture];\n };\n\n gui.add(settings, 'Bump Mode', [\n 'Diffuse Texture',\n 'Normal Texture',\n 'Depth Texture',\n 'Normal Map',\n 'Parallax Scale',\n 'Steep Parallax',\n ]);\n gui\n .add(settings, 'Texture', ['Spiral', 'Toybox', 'BrickWall'])\n .onChange(onChangeTexture);\n const lightFolder = gui.addFolder('Light');\n const depthFolder = gui.addFolder('Depth');\n lightFolder.add(settings, 'Reset Light').onChange(() => {\n lightPosXController.setValue(1.7);\n lightPosYController.setValue(0.7);\n lightPosZController.setValue(-1.9);\n lightIntensityController.setValue(0.02);\n });\n const lightPosXController = lightFolder\n .add(settings, 'lightPosX', -5, 5)\n .step(0.1);\n const lightPosYController = lightFolder\n .add(settings, 'lightPosY', -5, 5)\n .step(0.1);\n const lightPosZController = lightFolder\n .add(settings, 'lightPosZ', -5, 5)\n .step(0.1);\n const lightIntensityController = lightFolder\n .add(settings, 'lightIntensity', 0.0, 0.1)\n .step(0.002);\n depthFolder.add(settings, 'depthScale', 0.0, 0.1).step(0.01);\n depthFolder.add(settings, 'depthLayers', 1, 32).step(1);\n\n function frame() {\n if (!pageState.active) return;\n\n // Write to normal map shader\n const viewMatrix = getViewMatrix();\n\n const modelMatrix = getModelMatrix();\n\n const matrices = new Float32Array([\n ...projectionMatrix,\n ...viewMatrix,\n ...modelMatrix,\n ]);\n\n const mappingType = getMappingType();\n\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n matrices.buffer,\n matrices.byteOffset,\n matrices.byteLength\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 0,\n new Uint32Array([mappingType])\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 4,\n new Float32Array([\n settings.lightPosX,\n settings.lightPosY,\n settings.lightPosZ,\n settings.lightIntensity,\n settings.depthScale,\n settings.depthLayers,\n ])\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n // Draw textured Cube\n passEncoder.setPipeline(texturedCubePipeline);\n passEncoder.setBindGroup(0, frameBGDescriptor.bindGroups[0]);\n passEncoder.setBindGroup(\n 1,\n surfaceBGDescriptor.bindGroups[currentSurfaceBindGroup]\n );\n passEncoder.setVertexBuffer(0, box.vertexBuffer);\n passEncoder.setIndexBuffer(box.indexBuffer, 'uint16');\n passEncoder.drawIndexed(box.indexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst NormalMapping: () => JSX.Element = () =>\n makeSample({\n name: 'Normal Mapping',\n description:\n 'This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './normalMap.wgsl',\n contents: normalMapWGSL,\n editable: true,\n },\n {\n name: '../../meshes/box.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/box.ts').default,\n },\n {\n name: '../../meshes/mesh.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/mesh.ts').default,\n },\n {\n name: './utils.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!./utils.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default NormalMapping;\n"},{name:"./normalMap.wgsl",contents:s,editable:!0},{name:"../../meshes/box.ts",contents:t(3583).Z},{name:"../../meshes/mesh.ts",contents:t(3150).Z},{name:"./utils.ts",contents:t(1146).Z}],filename:b});var S=T},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},3583:function(e,n){"use strict";n.Z="import { vec3 } from 'wgpu-matrix';\nimport { getMeshPosAtIndex, getMeshUVAtIndex, Mesh } from './mesh';\n\nexport interface BoxMesh extends Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n//// Borrowed and simplified from https://github.com/mrdoob/three.js/blob/master/src/geometries/BoxGeometry.js\n//// Presumes vertex buffer alignment of verts, normals, and uvs\nconst createBoxGeometry = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n) => {\n widthSegments = Math.floor(widthSegments);\n heightSegments = Math.floor(heightSegments);\n depthSegments = Math.floor(depthSegments);\n\n const indices = [];\n const vertNormalUVBuffer = [];\n\n let numVertices = 0;\n\n const buildPlane = (\n u: 0 | 1 | 2,\n v: 0 | 1 | 2,\n w: 0 | 1 | 2,\n udir: -1 | 1,\n vdir: -1 | 1,\n planeWidth: number,\n planeHeight: number,\n planeDepth: number,\n xSections: number,\n ySections: number\n ) => {\n const segmentWidth = planeWidth / xSections;\n const segmentHeight = planeHeight / ySections;\n\n const widthHalf = planeWidth / 2;\n const heightHalf = planeHeight / 2;\n const depthHalf = planeDepth / 2;\n\n const gridX1 = xSections + 1;\n const gridY1 = ySections + 1;\n\n let vertexCounter = 0;\n\n const vertex = vec3.create();\n const normal = vec3.create();\n for (let iy = 0; iy < gridY1; iy++) {\n const y = iy * segmentHeight - heightHalf;\n\n for (let ix = 0; ix < gridX1; ix++) {\n const x = ix * segmentWidth - widthHalf;\n\n //Calculate plane vertices\n vertex[u] = x * udir;\n vertex[v] = y * vdir;\n vertex[w] = depthHalf;\n vertNormalUVBuffer.push(...vertex);\n\n //Caclulate normal\n normal[u] = 0;\n normal[v] = 0;\n normal[w] = planeDepth > 0 ? 1.0 : -1.0;\n vertNormalUVBuffer.push(...normal);\n\n //Calculate uvs\n vertNormalUVBuffer.push(ix / xSections);\n vertNormalUVBuffer.push(1 - iy / ySections);\n\n vertexCounter += 1;\n }\n }\n\n for (let iy = 0; iy < ySections; iy++) {\n for (let ix = 0; ix < xSections; ix++) {\n const a = numVertices + ix + gridX1 * iy;\n const b = numVertices + ix + gridX1 * (iy + 1);\n const c = numVertices + (ix + 1) + gridX1 * (iy + 1);\n const d = numVertices + (ix + 1) + gridX1 * iy;\n\n //Push vertex indices\n //6 indices for each face\n indices.push(a, b, d);\n indices.push(b, c, d);\n\n numVertices += vertexCounter;\n }\n }\n };\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n -1,\n -1,\n depth,\n height,\n width,\n depthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n 1,\n -1,\n depth,\n height,\n -width,\n depthSegments,\n heightSegments\n );\n\n //Bottom face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n 1,\n width,\n depth,\n height,\n widthSegments,\n depthSegments\n );\n\n //Top face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n -1,\n width,\n depth,\n -height,\n widthSegments,\n depthSegments\n );\n\n //Side faces\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n 1,\n -1,\n width,\n height,\n depth,\n widthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n -1,\n -1,\n width,\n height,\n -depth,\n widthSegments,\n heightSegments\n );\n\n return {\n vertices: vertNormalUVBuffer,\n indices: indices,\n };\n};\n\ntype IndexFormat = 'uint16' | 'uint32';\n\n// Box mesh code ported from threejs, with addition of indexFormat specifier for vertex pulling\nexport const createBoxMesh = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0,\n indexFormat: IndexFormat = 'uint16'\n): Mesh => {\n const { vertices, indices } = createBoxGeometry(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const vertexStride = 8 * Float32Array.BYTES_PER_ELEMENT; //calculateVertexStride(vertexProperties);\n\n const indicesArray =\n indexFormat === 'uint16'\n ? new Uint16Array(indices)\n : new Uint32Array(indices);\n\n return {\n vertices: new Float32Array(vertices),\n indices: indicesArray,\n vertexStride: vertexStride,\n };\n};\n\nexport const createBoxMeshWithTangents = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n): Mesh => {\n const mesh = createBoxMesh(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const originalStrideElements =\n mesh.vertexStride / Float32Array.BYTES_PER_ELEMENT;\n\n const vertexCount = mesh.vertices.length / originalStrideElements;\n\n const tangents = new Array(vertexCount);\n const bitangents = new Array(vertexCount);\n const counts = new Array(vertexCount);\n for (let i = 0; i < vertexCount; i++) {\n tangents[i] = [0, 0, 0];\n bitangents[i] = [0, 0, 0];\n counts[i] = 0;\n }\n\n for (let i = 0; i < mesh.indices.length; i += 3) {\n const [idx1, idx2, idx3] = [\n mesh.indices[i],\n mesh.indices[i + 1],\n mesh.indices[i + 2],\n ];\n\n const [pos1, pos2, pos3] = [\n getMeshPosAtIndex(mesh, idx1),\n getMeshPosAtIndex(mesh, idx2),\n getMeshPosAtIndex(mesh, idx3),\n ];\n\n const [uv1, uv2, uv3] = [\n getMeshUVAtIndex(mesh, idx1),\n getMeshUVAtIndex(mesh, idx2),\n getMeshUVAtIndex(mesh, idx3),\n ];\n\n const edge1 = vec3.sub(pos2, pos1);\n const edge2 = vec3.sub(pos3, pos1);\n const deltaUV1 = vec3.sub(uv2, uv1);\n const deltaUV2 = vec3.sub(uv3, uv1);\n\n // Edge of a triangle moves in both u and v direction (2d)\n // deltaU * tangent vector + deltav * bitangent\n // Manipulating the data into matrices, we get an equation\n\n const constantVal =\n 1.0 / (deltaUV1[0] * deltaUV2[1] - deltaUV1[1] * deltaUV2[0]);\n\n const tangent = [\n constantVal * (deltaUV2[1] * edge1[0] - deltaUV1[1] * edge2[0]),\n constantVal * (deltaUV2[1] * edge1[1] - deltaUV1[1] * edge2[1]),\n constantVal * (deltaUV2[1] * edge1[2] - deltaUV1[1] * edge2[2]),\n ];\n\n const bitangent = [\n constantVal * (-deltaUV2[0] * edge1[0] + deltaUV1[0] * edge2[0]),\n constantVal * (-deltaUV2[0] * edge1[1] + deltaUV1[0] * edge2[1]),\n constantVal * (-deltaUV2[0] * edge1[2] + deltaUV1[0] * edge2[2]),\n ];\n\n //Accumulate tangents and bitangents\n tangents[idx1] = vec3.add(tangents[idx1], tangent);\n bitangents[idx1] = vec3.add(bitangents[idx1], bitangent);\n tangents[idx2] = vec3.add(tangents[idx2], tangent);\n bitangents[idx2] = vec3.add(bitangents[idx2], bitangent);\n tangents[idx3] = vec3.add(tangents[idx3], tangent);\n bitangents[idx3] = vec3.add(bitangents[idx3], bitangent);\n\n //Increment index count\n counts[idx1]++;\n counts[idx2]++;\n counts[idx3]++;\n }\n\n for (let i = 0; i < tangents.length; i++) {\n tangents[i] = vec3.divScalar(tangents[i], counts[i]);\n bitangents[i] = vec3.divScalar(bitangents[i], counts[i]);\n }\n\n const newStrideElements = 14;\n const wTangentArray = new Float32Array(vertexCount * newStrideElements);\n\n for (let i = 0; i < vertexCount; i++) {\n //Copy original vertex data (pos, normal uv)\n wTangentArray.set(\n //Get the original vertex [8 elements] (3 ele pos, 3 ele normal, 2 ele uv)\n mesh.vertices.subarray(\n i * originalStrideElements,\n (i + 1) * originalStrideElements\n ),\n //And put it at the proper location in the new array [14 bytes = 8 og + 6 empty]\n i * newStrideElements\n );\n //For each vertex, place tangent after originalStride\n wTangentArray.set(\n tangents[i],\n i * newStrideElements + originalStrideElements\n );\n //Place bitangent after 3 elements of tangent\n wTangentArray.set(\n bitangents[i],\n i * newStrideElements + originalStrideElements + 3\n );\n }\n\n return {\n vertices: wTangentArray,\n indices: mesh.indices,\n vertexStride: mesh.vertexStride + Float32Array.BYTES_PER_ELEMENT * 3 * 2,\n };\n};\n"},3150:function(e,n){"use strict";n.Z="import { vec3, vec2 } from 'wgpu-matrix';\n\n// Defines what to pass to pipeline to render mesh\nexport interface Renderable {\n vertexBuffer: GPUBuffer;\n indexBuffer: GPUBuffer;\n indexCount: number;\n bindGroup?: GPUBindGroup;\n}\n\nexport interface Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n/**\n * @param {GPUDevice} device - A valid GPUDevice.\n * @param {Mesh} mesh - An indexed triangle-list mesh, containing its vertices, indices, and vertexStride (number of elements per vertex).\n * @param {boolean} storeVertices - A boolean flag indicating whether the vertexBuffer should be available to use as a storage buffer.\n * @returns {boolean} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createMeshRenderable = (\n device: GPUDevice,\n mesh: Mesh,\n storeVertices = false,\n storeIndices = false\n): Renderable => {\n // Define buffer usage\n const vertexBufferUsage = storeVertices\n ? GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.VERTEX;\n const indexBufferUsage = storeIndices\n ? GPUBufferUsage.INDEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.INDEX;\n\n // Create vertex and index buffers\n const vertexBuffer = device.createBuffer({\n size: mesh.vertices.byteLength,\n usage: vertexBufferUsage,\n mappedAtCreation: true,\n });\n new Float32Array(vertexBuffer.getMappedRange()).set(mesh.vertices);\n vertexBuffer.unmap();\n\n const indexBuffer = device.createBuffer({\n size: mesh.indices.byteLength,\n usage: indexBufferUsage,\n mappedAtCreation: true,\n });\n\n // Determine whether index buffer is indices are in uint16 or uint32 format\n if (\n mesh.indices.byteLength ===\n mesh.indices.length * Uint16Array.BYTES_PER_ELEMENT\n ) {\n new Uint16Array(indexBuffer.getMappedRange()).set(mesh.indices);\n } else {\n new Uint32Array(indexBuffer.getMappedRange()).set(mesh.indices);\n }\n\n indexBuffer.unmap();\n\n return {\n vertexBuffer,\n indexBuffer,\n indexCount: mesh.indices.length,\n };\n};\n\nexport const getMeshPosAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 0,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshNormalAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 3 * Float32Array.BYTES_PER_ELEMENT,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshUVAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 6 * Float32Array.BYTES_PER_ELEMENT,\n 2\n );\n return vec2.fromValues(arr[0], arr[1]);\n};\n"},1146:function(e,n){"use strict";n.Z="type BindGroupBindingLayout =\n | GPUBufferBindingLayout\n | GPUTextureBindingLayout\n | GPUSamplerBindingLayout\n | GPUStorageTextureBindingLayout\n | GPUExternalTextureBindingLayout;\n\nexport type BindGroupsObjectsAndLayout = {\n bindGroups: GPUBindGroup[];\n bindGroupLayout: GPUBindGroupLayout;\n};\n\ntype ResourceTypeName =\n | 'buffer'\n | 'texture'\n | 'sampler'\n | 'externalTexture'\n | 'storageTexture';\n\n/**\n * @param {number[]} bindings - The binding value of each resource in the bind group.\n * @param {number[]} visibilities - The GPUShaderStage visibility of the resource at the corresponding index.\n * @param {ResourceTypeName[]} resourceTypes - The resourceType at the corresponding index.\n * @returns {BindGroupsObjectsAndLayout} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createBindGroupDescriptor = (\n bindings: number[],\n visibilities: number[],\n resourceTypes: ResourceTypeName[],\n resourceLayouts: BindGroupBindingLayout[],\n resources: GPUBindingResource[][],\n label: string,\n device: GPUDevice\n): BindGroupsObjectsAndLayout => {\n // Create layout of each entry within a bindGroup\n const layoutEntries: GPUBindGroupLayoutEntry[] = [];\n for (let i = 0; i < bindings.length; i++) {\n layoutEntries.push({\n binding: bindings[i],\n visibility: visibilities[i % visibilities.length],\n [resourceTypes[i]]: resourceLayouts[i],\n });\n }\n\n // Apply entry layouts to bindGroupLayout\n const bindGroupLayout = device.createBindGroupLayout({\n label: `${label}.bindGroupLayout`,\n entries: layoutEntries,\n });\n\n // Create bindGroups that conform to the layout\n const bindGroups: GPUBindGroup[] = [];\n for (let i = 0; i < resources.length; i++) {\n const groupEntries: GPUBindGroupEntry[] = [];\n for (let j = 0; j < resources[0].length; j++) {\n groupEntries.push({\n binding: j,\n resource: resources[i][j],\n });\n }\n const newBindGroup = device.createBindGroup({\n label: `${label}.bindGroup${i}`,\n layout: bindGroupLayout,\n entries: groupEntries,\n });\n bindGroups.push(newBindGroup);\n }\n\n return {\n bindGroups,\n bindGroupLayout,\n };\n};\n\nexport type ShaderKeyInterface = {\n [K in T[number]]: number;\n};\n\ninterface AttribAcc {\n attributes: GPUVertexAttribute[];\n arrayStride: number;\n}\n\n/**\n * @param {GPUVertexFormat} vf - A valid GPUVertexFormat, representing a per-vertex value that can be passed to the vertex shader.\n * @returns {number} The number of bytes present in the value to be passed.\n */\nexport const convertVertexFormatToBytes = (vf: GPUVertexFormat): number => {\n const splitFormat = vf.split('x');\n const bytesPerElement = parseInt(splitFormat[0].replace(/[^0-9]/g, '')) / 8;\n\n const bytesPerVec =\n bytesPerElement *\n (splitFormat[1] !== undefined ? parseInt(splitFormat[1]) : 1);\n\n return bytesPerVec;\n};\n\n/** Creates a GPUVertexBuffer Layout that maps to an interleaved vertex buffer.\n * @param {GPUVertexFormat[]} vertexFormats - An array of valid GPUVertexFormats.\n * @returns {GPUVertexBufferLayout} A GPUVertexBufferLayout representing an interleaved vertex buffer.\n */\nexport const createVBuffer = (\n vertexFormats: GPUVertexFormat[]\n): GPUVertexBufferLayout => {\n const initialValue: AttribAcc = { attributes: [], arrayStride: 0 };\n\n const vertexBuffer = vertexFormats.reduce(\n (acc: AttribAcc, curr: GPUVertexFormat, idx: number) => {\n const newAttribute: GPUVertexAttribute = {\n shaderLocation: idx,\n offset: acc.arrayStride,\n format: curr,\n };\n const nextOffset: number =\n acc.arrayStride + convertVertexFormatToBytes(curr);\n\n const retVal: AttribAcc = {\n attributes: [...acc.attributes, newAttribute],\n arrayStride: nextOffset,\n };\n return retVal;\n },\n initialValue\n );\n\n const layout: GPUVertexBufferLayout = {\n arrayStride: vertexBuffer.arrayStride,\n attributes: vertexBuffer.attributes,\n };\n\n return layout;\n};\n\nexport const create3DRenderPipeline = (\n device: GPUDevice,\n label: string,\n bgLayouts: GPUBindGroupLayout[],\n vertexShader: string,\n vBufferFormats: GPUVertexFormat[],\n fragmentShader: string,\n presentationFormat: GPUTextureFormat,\n depthTest = false,\n topology: GPUPrimitiveTopology = 'triangle-list',\n cullMode: GPUCullMode = 'back'\n) => {\n const pipelineDescriptor: GPURenderPipelineDescriptor = {\n label: `${label}.pipeline`,\n layout: device.createPipelineLayout({\n label: `${label}.pipelineLayout`,\n bindGroupLayouts: bgLayouts,\n }),\n vertex: {\n module: device.createShaderModule({\n label: `${label}.vertexShader`,\n code: vertexShader,\n }),\n entryPoint: 'vertexMain',\n buffers:\n vBufferFormats.length !== 0 ? [createVBuffer(vBufferFormats)] : [],\n },\n fragment: {\n module: device.createShaderModule({\n label: `${label}.fragmentShader`,\n code: fragmentShader,\n }),\n entryPoint: 'fragmentMain',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: topology,\n cullMode: cullMode,\n },\n };\n if (depthTest) {\n pipelineDescriptor.depthStencil = {\n depthCompare: 'less',\n depthWriteEnabled: true,\n format: 'depth24plus',\n };\n }\n return device.createRenderPipeline(pipelineDescriptor);\n};\n\nexport const createTextureFromImage = (\n device: GPUDevice,\n bitmap: ImageBitmap\n) => {\n const texture: GPUTexture = device.createTexture({\n size: [bitmap.width, bitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: bitmap },\n { texture: texture },\n [bitmap.width, bitmap.height]\n );\n return texture;\n};\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/167.0cb2923e32255961.js b/_next/static/chunks/167.0cb2923e32255961.js deleted file mode 100644 index e41a32f5..00000000 --- a/_next/static/chunks/167.0cb2923e32255961.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[167],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),u=t(9147),l=t.n(u);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),u=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),f=(0,o.useRouter)(),m=f.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[v,g]=(0,s.useState)(null),[h,b]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?b(m[1]):b(a[0].name),c&&u.current)for(u.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),g(e)})}catch(s){console.error(s),g(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),v?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(v)})]}):null]}),(0,r.jsxs)("div",{className:l().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:l().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":h==e.name,onClick(){b(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":h==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function p(e,n){if(!e)throw Error(n)}},6167:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return c}});var r=t(6416),a=t(5671),i="////////////////////////////////////////////////////////////////////////////////\n// Utilities\n////////////////////////////////////////////////////////////////////////////////\nvar rand_seed : vec2;\n\nfn init_rand(invocation_id : u32, seed : vec4) {\n rand_seed = seed.xz;\n rand_seed = fract(rand_seed * cos(35.456+f32(invocation_id) * seed.yw));\n rand_seed = fract(rand_seed * cos(41.235+f32(invocation_id) * seed.xw));\n}\n\nfn rand() -> f32 {\n rand_seed.x = fract(cos(dot(rand_seed, vec2(23.14077926, 232.61690225))) * 136.8168);\n rand_seed.y = fract(cos(dot(rand_seed, vec2(54.47856553, 345.84153136))) * 534.7645);\n return rand_seed.y;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Vertex shader\n////////////////////////////////////////////////////////////////////////////////\nstruct RenderParams {\n modelViewProjectionMatrix : mat4x4,\n right : vec3,\n up : vec3\n}\n@binding(0) @group(0) var render_params : RenderParams;\n\nstruct VertexInput {\n @location(0) position : vec3,\n @location(1) color : vec4,\n @location(2) quad_pos : vec2, // -1..+1\n}\n\nstruct VertexOutput {\n @builtin(position) position : vec4,\n @location(0) color : vec4,\n @location(1) quad_pos : vec2, // -1..+1\n}\n\n@vertex\nfn vs_main(in : VertexInput) -> VertexOutput {\n var quad_pos = mat2x3(render_params.right, render_params.up) * in.quad_pos;\n var position = in.position + quad_pos * 0.01;\n var out : VertexOutput;\n out.position = render_params.modelViewProjectionMatrix * vec4(position, 1.0);\n out.color = in.color;\n out.quad_pos = in.quad_pos;\n return out;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Fragment shader\n////////////////////////////////////////////////////////////////////////////////\n@fragment\nfn fs_main(in : VertexOutput) -> @location(0) vec4 {\n var color = in.color;\n // Apply a circular particle alpha mask\n color.a = color.a * max(1.0 - length(in.quad_pos), 0.0);\n return color;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Simulation Compute shader\n////////////////////////////////////////////////////////////////////////////////\nstruct SimulationParams {\n deltaTime : f32,\n seed : vec4,\n}\n\nstruct Particle {\n position : vec3,\n lifetime : f32,\n color : vec4,\n velocity : vec3,\n}\n\nstruct Particles {\n particles : array,\n}\n\n@binding(0) @group(0) var sim_params : SimulationParams;\n@binding(1) @group(0) var data : Particles;\n@binding(2) @group(0) var texture : texture_2d;\n\n@compute @workgroup_size(64)\nfn simulate(@builtin(global_invocation_id) global_invocation_id : vec3) {\n let idx = global_invocation_id.x;\n\n init_rand(idx, sim_params.seed);\n\n var particle = data.particles[idx];\n\n // Apply gravity\n particle.velocity.z = particle.velocity.z - sim_params.deltaTime * 0.5;\n\n // Basic velocity integration\n particle.position = particle.position + sim_params.deltaTime * particle.velocity;\n\n // Age each particle. Fade out before vanishing.\n particle.lifetime = particle.lifetime - sim_params.deltaTime;\n particle.color.a = smoothstep(0.0, 0.5, particle.lifetime);\n\n // If the lifetime has gone negative, then the particle is dead and should be\n // respawned.\n if (particle.lifetime < 0.0) {\n // Use the probability map to find where the particle should be spawned.\n // Starting with the 1x1 mip level.\n var coord : vec2;\n for (var level = u32(textureNumLevels(texture) - 1); level > 0; level--) {\n // Load the probability value from the mip-level\n // Generate a random number and using the probabilty values, pick the\n // next texel in the next largest mip level:\n //\n // 0.0 probabilites.r probabilites.g probabilites.b 1.0\n // | | | | |\n // | TOP-LEFT | TOP-RIGHT | BOTTOM-LEFT | BOTTOM_RIGHT |\n //\n let probabilites = textureLoad(texture, coord, level);\n let value = vec4(rand());\n let mask = (value >= vec4(0.0, probabilites.xyz)) & (value < probabilites);\n coord = coord * 2;\n coord.x = coord.x + select(0, 1, any(mask.yw)); // x y\n coord.y = coord.y + select(0, 1, any(mask.zw)); // z w\n }\n let uv = vec2(coord) / vec2(textureDimensions(texture));\n particle.position = vec3((uv - 0.5) * 3.0 * vec2(1.0, -1.0), 0.0);\n particle.color = textureLoad(texture, coord, 0);\n particle.velocity.x = (rand() - 0.5) * 0.1;\n particle.velocity.y = (rand() - 0.5) * 0.1;\n particle.velocity.z = rand() * 0.3;\n particle.lifetime = 0.5 + rand() * 3.0;\n }\n\n // Store the new particle value\n data.particles[idx] = particle;\n}\n",o="struct UBO {\n width : u32,\n}\n\nstruct Buffer {\n weights : array,\n}\n\n@binding(0) @group(0) var ubo : UBO;\n@binding(1) @group(0) var buf_in : Buffer;\n@binding(2) @group(0) var buf_out : Buffer;\n@binding(3) @group(0) var tex_in : texture_2d;\n@binding(3) @group(0) var tex_out : texture_storage_2d;\n\n\n////////////////////////////////////////////////////////////////////////////////\n// import_level\n//\n// Loads the alpha channel from a texel of the source image, and writes it to\n// the buf_out.weights.\n////////////////////////////////////////////////////////////////////////////////\n@compute @workgroup_size(64)\nfn import_level(@builtin(global_invocation_id) coord : vec3) {\n _ = &buf_in;\n let offset = coord.x + coord.y * ubo.width;\n buf_out.weights[offset] = textureLoad(tex_in, vec2(coord.xy), 0).w;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// export_level\n//\n// Loads 4 f32 weight values from buf_in.weights, and stores summed value into\n// buf_out.weights, along with the calculated 'probabilty' vec4 values into the\n// mip level of tex_out. See simulate() in particle.wgsl to understand the\n// probability logic.\n////////////////////////////////////////////////////////////////////////////////\n@compute @workgroup_size(64)\nfn export_level(@builtin(global_invocation_id) coord : vec3) {\n if (all(coord.xy < vec2(textureDimensions(tex_out)))) {\n let dst_offset = coord.x + coord.y * ubo.width;\n let src_offset = coord.x*2u + coord.y*2u * ubo.width;\n\n let a = buf_in.weights[src_offset + 0u];\n let b = buf_in.weights[src_offset + 1u];\n let c = buf_in.weights[src_offset + 0u + ubo.width];\n let d = buf_in.weights[src_offset + 1u + ubo.width];\n let sum = dot(vec4(a, b, c, d), vec4(1.0));\n\n buf_out.weights[dst_offset] = sum / 4.0;\n\n let probabilities = vec4(a, a+b, a+b+c, sum) / max(sum, 0.0001);\n textureStore(tex_out, vec2(coord.xy), probabilities);\n }\n}\n",s="src/sample/particles/main.ts";let u=async e=>{let n,{canvas:t,pageState:a,gui:s}=e,u=await navigator.gpu.requestAdapter(),l=await u.requestDevice();if(!a.active)return;let c=t.getContext("webgpu"),d=window.devicePixelRatio;t.width=t.clientWidth*d,t.height=t.clientHeight*d;let p=navigator.gpu.getPreferredCanvasFormat();c.configure({device:l,format:p,alphaMode:"premultiplied"});let f=l.createBuffer({size:24e5,usage:GPUBufferUsage.VERTEX|GPUBufferUsage.STORAGE}),m=l.createRenderPipeline({layout:"auto",vertex:{module:l.createShaderModule({code:i}),entryPoint:"vs_main",buffers:[{arrayStride:48,stepMode:"instance",attributes:[{shaderLocation:0,offset:0,format:"float32x3"},{shaderLocation:1,offset:16,format:"float32x4"}]},{arrayStride:8,stepMode:"vertex",attributes:[{shaderLocation:2,offset:0,format:"float32x2"}]}]},fragment:{module:l.createShaderModule({code:i}),entryPoint:"fs_main",targets:[{format:p,blend:{color:{srcFactor:"src-alpha",dstFactor:"one",operation:"add"},alpha:{srcFactor:"zero",dstFactor:"one",operation:"add"}}}]},primitive:{topology:"triangle-list"},depthStencil:{depthWriteEnabled:!1,depthCompare:"less",format:"depth24plus"}}),v=l.createTexture({size:[t.width,t.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),g=l.createBuffer({size:96,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),h=l.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:g}}]}),b={colorAttachments:[{view:void 0,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:v.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},x=l.createBuffer({size:48,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(x.getMappedRange()).set([-1,-1,1,-1,-1,1,-1,1,1,-1,1,1]),x.unmap();let _=1,w=1,P=1;{let y=await fetch("/assets/img/webgpu.png"),B=await createImageBitmap(await y.blob());for(;_>C,L=w>>C,R=0==C?E.getBindGroupLayout(0):U.getBindGroupLayout(0),z=l.createBindGroup({layout:R,entries:[{binding:0,resource:{buffer:G}},{binding:1,resource:{buffer:1&C?T:M}},{binding:2,resource:{buffer:1&C?M:T}},{binding:3,resource:n.createView({format:"rgba8unorm",dimension:"2d",baseMipLevel:C,mipLevelCount:1})}]});if(0==C){let A=S.beginComputePass();A.setPipeline(E),A.setBindGroup(0,z),A.dispatchWorkgroups(Math.ceil(O/64),L),A.end()}else{let F=S.beginComputePass();F.setPipeline(U),F.setBindGroup(0,z),F.dispatchWorkgroups(Math.ceil(O/64),L),F.end()}}l.queue.submit([S.finish()])}let I={simulate:!0,deltaTime:.04},V=l.createBuffer({size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});Object.keys(I).forEach(e=>{s.add(I,e)});let q=l.createComputePipeline({layout:"auto",compute:{module:l.createShaderModule({code:i}),entryPoint:"simulate"}}),j=l.createBindGroup({layout:q.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:V}},{binding:1,resource:{buffer:f,offset:0,size:24e5}},{binding:2,resource:n.createView()}]}),N=t.width/t.height,W=r._E.perspective(2*Math.PI/5,N,1,100),k=r._E.create(),D=r._E.create();requestAnimationFrame(function e(){if(!a.active)return;l.queue.writeBuffer(V,0,new Float32Array([I.simulate?I.deltaTime:0,0,0,0,100*Math.random(),100*Math.random(),1+Math.random(),1+Math.random()])),r._E.identity(k),r._E.translate(k,r.R3.fromValues(0,0,-3),k),r._E.rotateX(k,-.2*Math.PI,k),r._E.multiply(W,k,D),l.queue.writeBuffer(g,0,new Float32Array([D[0],D[1],D[2],D[3],D[4],D[5],D[6],D[7],D[8],D[9],D[10],D[11],D[12],D[13],D[14],D[15],k[0],k[4],k[8],0,k[1],k[5],k[9],0]));let n=c.getCurrentTexture();b.colorAttachments[0].view=n.createView();let t=l.createCommandEncoder();{let i=t.beginComputePass();i.setPipeline(q),i.setBindGroup(0,j),i.dispatchWorkgroups(Math.ceil(781.25)),i.end()}{let o=t.beginRenderPass(b);o.setPipeline(m),o.setBindGroup(0,h),o.setVertexBuffer(0,f),o.setVertexBuffer(1,x),o.draw(6,5e4,0,0),o.end()}l.queue.submit([t.finish()]),requestAnimationFrame(e)})},l=()=>(0,a.Tl)({name:"Particles",description:"This example demonstrates rendering of particles simulated with compute shaders.",gui:!0,init:u,sources:[{name:s.substring(21),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport particleWGSL from './particle.wgsl';\nimport probabilityMapWGSL from './probabilityMap.wgsl';\n\nconst numParticles = 50000;\nconst particlePositionOffset = 0;\nconst particleColorOffset = 4 * 4;\nconst particleInstanceByteSize =\n 3 * 4 + // position\n 1 * 4 + // lifetime\n 4 * 4 + // color\n 3 * 4 + // velocity\n 1 * 4 + // padding\n 0;\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const particlesBuffer = device.createBuffer({\n size: numParticles * particleInstanceByteSize,\n usage: GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE,\n });\n\n const renderPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: particleWGSL,\n }),\n entryPoint: 'vs_main',\n buffers: [\n {\n // instanced particles buffer\n arrayStride: particleInstanceByteSize,\n stepMode: 'instance',\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: particlePositionOffset,\n format: 'float32x3',\n },\n {\n // color\n shaderLocation: 1,\n offset: particleColorOffset,\n format: 'float32x4',\n },\n ],\n },\n {\n // quad vertex buffer\n arrayStride: 2 * 4, // vec2\n stepMode: 'vertex',\n attributes: [\n {\n // vertex positions\n shaderLocation: 2,\n offset: 0,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: particleWGSL,\n }),\n entryPoint: 'fs_main',\n targets: [\n {\n format: presentationFormat,\n blend: {\n color: {\n srcFactor: 'src-alpha',\n dstFactor: 'one',\n operation: 'add',\n },\n alpha: {\n srcFactor: 'zero',\n dstFactor: 'one',\n operation: 'add',\n },\n },\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n\n depthStencil: {\n depthWriteEnabled: false,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize =\n 4 * 4 * 4 + // modelViewProjectionMatrix : mat4x4\n 3 * 4 + // right : vec3\n 4 + // padding\n 3 * 4 + // up : vec3\n 4 + // padding\n 0;\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: renderPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n //////////////////////////////////////////////////////////////////////////////\n // Quad vertex buffer\n //////////////////////////////////////////////////////////////////////////////\n const quadVertexBuffer = device.createBuffer({\n size: 6 * 2 * 4, // 6x vec2\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n // prettier-ignore\n const vertexData = [\n -1.0, -1.0, +1.0, -1.0, -1.0, +1.0, -1.0, +1.0, +1.0, -1.0, +1.0, +1.0,\n ];\n new Float32Array(quadVertexBuffer.getMappedRange()).set(vertexData);\n quadVertexBuffer.unmap();\n\n //////////////////////////////////////////////////////////////////////////////\n // Texture\n //////////////////////////////////////////////////////////////////////////////\n let texture: GPUTexture;\n let textureWidth = 1;\n let textureHeight = 1;\n let numMipLevels = 1;\n {\n const response = await fetch('/assets/img/webgpu.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n // Calculate number of mip levels required to generate the probability map\n while (\n textureWidth < imageBitmap.width ||\n textureHeight < imageBitmap.height\n ) {\n textureWidth *= 2;\n textureHeight *= 2;\n numMipLevels++;\n }\n texture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n mipLevelCount: numMipLevels,\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.STORAGE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: texture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n //////////////////////////////////////////////////////////////////////////////\n // Probability map generation\n // The 0'th mip level of texture holds the color data and spawn-probability in\n // the alpha channel. The mip levels 1..N are generated to hold spawn\n // probabilities up to the top 1x1 mip level.\n //////////////////////////////////////////////////////////////////////////////\n {\n const probabilityMapImportLevelPipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({ code: probabilityMapWGSL }),\n entryPoint: 'import_level',\n },\n });\n const probabilityMapExportLevelPipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({ code: probabilityMapWGSL }),\n entryPoint: 'export_level',\n },\n });\n\n const probabilityMapUBOBufferSize =\n 1 * 4 + // stride\n 3 * 4 + // padding\n 0;\n const probabilityMapUBOBuffer = device.createBuffer({\n size: probabilityMapUBOBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const buffer_a = device.createBuffer({\n size: textureWidth * textureHeight * 4,\n usage: GPUBufferUsage.STORAGE,\n });\n const buffer_b = device.createBuffer({\n size: textureWidth * textureHeight * 4,\n usage: GPUBufferUsage.STORAGE,\n });\n device.queue.writeBuffer(\n probabilityMapUBOBuffer,\n 0,\n new Int32Array([textureWidth])\n );\n const commandEncoder = device.createCommandEncoder();\n for (let level = 0; level < numMipLevels; level++) {\n const levelWidth = textureWidth >> level;\n const levelHeight = textureHeight >> level;\n const pipeline =\n level == 0\n ? probabilityMapImportLevelPipeline.getBindGroupLayout(0)\n : probabilityMapExportLevelPipeline.getBindGroupLayout(0);\n const probabilityMapBindGroup = device.createBindGroup({\n layout: pipeline,\n entries: [\n {\n // ubo\n binding: 0,\n resource: { buffer: probabilityMapUBOBuffer },\n },\n {\n // buf_in\n binding: 1,\n resource: { buffer: level & 1 ? buffer_a : buffer_b },\n },\n {\n // buf_out\n binding: 2,\n resource: { buffer: level & 1 ? buffer_b : buffer_a },\n },\n {\n // tex_in / tex_out\n binding: 3,\n resource: texture.createView({\n format: 'rgba8unorm',\n dimension: '2d',\n baseMipLevel: level,\n mipLevelCount: 1,\n }),\n },\n ],\n });\n if (level == 0) {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(probabilityMapImportLevelPipeline);\n passEncoder.setBindGroup(0, probabilityMapBindGroup);\n passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);\n passEncoder.end();\n } else {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(probabilityMapExportLevelPipeline);\n passEncoder.setBindGroup(0, probabilityMapBindGroup);\n passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);\n passEncoder.end();\n }\n }\n device.queue.submit([commandEncoder.finish()]);\n }\n\n //////////////////////////////////////////////////////////////////////////////\n // Simulation compute pipeline\n //////////////////////////////////////////////////////////////////////////////\n const simulationParams = {\n simulate: true,\n deltaTime: 0.04,\n };\n\n const simulationUBOBufferSize =\n 1 * 4 + // deltaTime\n 3 * 4 + // padding\n 4 * 4 + // seed\n 0;\n const simulationUBOBuffer = device.createBuffer({\n size: simulationUBOBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n Object.keys(simulationParams).forEach((k) => {\n gui.add(simulationParams, k);\n });\n\n const computePipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: particleWGSL,\n }),\n entryPoint: 'simulate',\n },\n });\n const computeBindGroup = device.createBindGroup({\n layout: computePipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: simulationUBOBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: particlesBuffer,\n offset: 0,\n size: numParticles * particleInstanceByteSize,\n },\n },\n {\n binding: 2,\n resource: texture.createView(),\n },\n ],\n });\n\n const aspect = canvas.width / canvas.height;\n const projection = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 100.0);\n const view = mat4.create();\n const mvp = mat4.create();\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n device.queue.writeBuffer(\n simulationUBOBuffer,\n 0,\n new Float32Array([\n simulationParams.simulate ? simulationParams.deltaTime : 0.0,\n 0.0,\n 0.0,\n 0.0, // padding\n Math.random() * 100,\n Math.random() * 100, // seed.xy\n 1 + Math.random(),\n 1 + Math.random(), // seed.zw\n ])\n );\n\n mat4.identity(view);\n mat4.translate(view, vec3.fromValues(0, 0, -3), view);\n mat4.rotateX(view, Math.PI * -0.2, view);\n mat4.multiply(projection, view, mvp);\n\n // prettier-ignore\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n new Float32Array([\n // modelViewProjectionMatrix\n mvp[0], mvp[1], mvp[2], mvp[3],\n mvp[4], mvp[5], mvp[6], mvp[7],\n mvp[8], mvp[9], mvp[10], mvp[11],\n mvp[12], mvp[13], mvp[14], mvp[15],\n\n view[0], view[4], view[8], // right\n\n 0, // padding\n\n view[1], view[5], view[9], // up\n\n 0, // padding\n ])\n );\n const swapChainTexture = context.getCurrentTexture();\n // prettier-ignore\n renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();\n\n const commandEncoder = device.createCommandEncoder();\n {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(computePipeline);\n passEncoder.setBindGroup(0, computeBindGroup);\n passEncoder.dispatchWorkgroups(Math.ceil(numParticles / 64));\n passEncoder.end();\n }\n {\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(renderPipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, particlesBuffer);\n passEncoder.setVertexBuffer(1, quadVertexBuffer);\n passEncoder.draw(6, numParticles, 0, 0);\n passEncoder.end();\n }\n\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst Particles: () => JSX.Element = () =>\n makeSample({\n name: 'Particles',\n description:\n 'This example demonstrates rendering of particles simulated with compute shaders.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './particle.wgsl',\n contents: particleWGSL,\n editable: true,\n },\n {\n name: './probabilityMap.wgsl',\n contents: probabilityMapWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default Particles;\n"},{name:"./particle.wgsl",contents:i,editable:!0},{name:"./probabilityMap.wgsl",contents:o,editable:!0}],filename:s});var c=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/167.ad599134368bd94d.js b/_next/static/chunks/167.ad599134368bd94d.js new file mode 100644 index 00000000..c82e38b5 --- /dev/null +++ b/_next/static/chunks/167.ad599134368bd94d.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[167],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),u=t(9147),l=t.n(u);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),u=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),f=(0,o.useRouter)(),m=f.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[v,g]=(0,s.useState)(null),[h,b]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(m?b(m[1]):b(a[0].name),c&&u.current)for(u.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),g(e)})}catch(s){console.error(s),g(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),v?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(v)})]}):null]}),(0,r.jsxs)("div",{className:l().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:l().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":h==e.name,onClick(){b(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":h==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function p(e,n){if(!e)throw Error(n)}},6167:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return c}});var r=t(6416),a=t(5671),i="////////////////////////////////////////////////////////////////////////////////\n// Utilities\n////////////////////////////////////////////////////////////////////////////////\nvar rand_seed : vec2;\n\nfn init_rand(invocation_id : u32, seed : vec4) {\n rand_seed = seed.xz;\n rand_seed = fract(rand_seed * cos(35.456+f32(invocation_id) * seed.yw));\n rand_seed = fract(rand_seed * cos(41.235+f32(invocation_id) * seed.xw));\n}\n\nfn rand() -> f32 {\n rand_seed.x = fract(cos(dot(rand_seed, vec2(23.14077926, 232.61690225))) * 136.8168);\n rand_seed.y = fract(cos(dot(rand_seed, vec2(54.47856553, 345.84153136))) * 534.7645);\n return rand_seed.y;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Vertex shader\n////////////////////////////////////////////////////////////////////////////////\nstruct RenderParams {\n modelViewProjectionMatrix : mat4x4,\n right : vec3,\n up : vec3\n}\n@binding(0) @group(0) var render_params : RenderParams;\n\nstruct VertexInput {\n @location(0) position : vec3,\n @location(1) color : vec4,\n @location(2) quad_pos : vec2, // -1..+1\n}\n\nstruct VertexOutput {\n @builtin(position) position : vec4,\n @location(0) color : vec4,\n @location(1) quad_pos : vec2, // -1..+1\n}\n\n@vertex\nfn vs_main(in : VertexInput) -> VertexOutput {\n var quad_pos = mat2x3(render_params.right, render_params.up) * in.quad_pos;\n var position = in.position + quad_pos * 0.01;\n var out : VertexOutput;\n out.position = render_params.modelViewProjectionMatrix * vec4(position, 1.0);\n out.color = in.color;\n out.quad_pos = in.quad_pos;\n return out;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Fragment shader\n////////////////////////////////////////////////////////////////////////////////\n@fragment\nfn fs_main(in : VertexOutput) -> @location(0) vec4 {\n var color = in.color;\n // Apply a circular particle alpha mask\n color.a = color.a * max(1.0 - length(in.quad_pos), 0.0);\n return color;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Simulation Compute shader\n////////////////////////////////////////////////////////////////////////////////\nstruct SimulationParams {\n deltaTime : f32,\n seed : vec4,\n}\n\nstruct Particle {\n position : vec3,\n lifetime : f32,\n color : vec4,\n velocity : vec3,\n}\n\nstruct Particles {\n particles : array,\n}\n\n@binding(0) @group(0) var sim_params : SimulationParams;\n@binding(1) @group(0) var data : Particles;\n@binding(2) @group(0) var texture : texture_2d;\n\n@compute @workgroup_size(64)\nfn simulate(@builtin(global_invocation_id) global_invocation_id : vec3) {\n let idx = global_invocation_id.x;\n\n init_rand(idx, sim_params.seed);\n\n var particle = data.particles[idx];\n\n // Apply gravity\n particle.velocity.z = particle.velocity.z - sim_params.deltaTime * 0.5;\n\n // Basic velocity integration\n particle.position = particle.position + sim_params.deltaTime * particle.velocity;\n\n // Age each particle. Fade out before vanishing.\n particle.lifetime = particle.lifetime - sim_params.deltaTime;\n particle.color.a = smoothstep(0.0, 0.5, particle.lifetime);\n\n // If the lifetime has gone negative, then the particle is dead and should be\n // respawned.\n if (particle.lifetime < 0.0) {\n // Use the probability map to find where the particle should be spawned.\n // Starting with the 1x1 mip level.\n var coord : vec2;\n for (var level = u32(textureNumLevels(texture) - 1); level > 0; level--) {\n // Load the probability value from the mip-level\n // Generate a random number and using the probabilty values, pick the\n // next texel in the next largest mip level:\n //\n // 0.0 probabilites.r probabilites.g probabilites.b 1.0\n // | | | | |\n // | TOP-LEFT | TOP-RIGHT | BOTTOM-LEFT | BOTTOM_RIGHT |\n //\n let probabilites = textureLoad(texture, coord, level);\n let value = vec4(rand());\n let mask = (value >= vec4(0.0, probabilites.xyz)) & (value < probabilites);\n coord = coord * 2;\n coord.x = coord.x + select(0, 1, any(mask.yw)); // x y\n coord.y = coord.y + select(0, 1, any(mask.zw)); // z w\n }\n let uv = vec2(coord) / vec2(textureDimensions(texture));\n particle.position = vec3((uv - 0.5) * 3.0 * vec2(1.0, -1.0), 0.0);\n particle.color = textureLoad(texture, coord, 0);\n particle.velocity.x = (rand() - 0.5) * 0.1;\n particle.velocity.y = (rand() - 0.5) * 0.1;\n particle.velocity.z = rand() * 0.3;\n particle.lifetime = 0.5 + rand() * 3.0;\n }\n\n // Store the new particle value\n data.particles[idx] = particle;\n}\n",o="struct UBO {\n width : u32,\n}\n\nstruct Buffer {\n weights : array,\n}\n\n@binding(0) @group(0) var ubo : UBO;\n@binding(1) @group(0) var buf_in : Buffer;\n@binding(2) @group(0) var buf_out : Buffer;\n@binding(3) @group(0) var tex_in : texture_2d;\n@binding(3) @group(0) var tex_out : texture_storage_2d;\n\n\n////////////////////////////////////////////////////////////////////////////////\n// import_level\n//\n// Loads the alpha channel from a texel of the source image, and writes it to\n// the buf_out.weights.\n////////////////////////////////////////////////////////////////////////////////\n@compute @workgroup_size(64)\nfn import_level(@builtin(global_invocation_id) coord : vec3) {\n _ = &buf_in;\n let offset = coord.x + coord.y * ubo.width;\n buf_out.weights[offset] = textureLoad(tex_in, vec2(coord.xy), 0).w;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// export_level\n//\n// Loads 4 f32 weight values from buf_in.weights, and stores summed value into\n// buf_out.weights, along with the calculated 'probabilty' vec4 values into the\n// mip level of tex_out. See simulate() in particle.wgsl to understand the\n// probability logic.\n////////////////////////////////////////////////////////////////////////////////\n@compute @workgroup_size(64)\nfn export_level(@builtin(global_invocation_id) coord : vec3) {\n if (all(coord.xy < vec2(textureDimensions(tex_out)))) {\n let dst_offset = coord.x + coord.y * ubo.width;\n let src_offset = coord.x*2u + coord.y*2u * ubo.width;\n\n let a = buf_in.weights[src_offset + 0u];\n let b = buf_in.weights[src_offset + 1u];\n let c = buf_in.weights[src_offset + 0u + ubo.width];\n let d = buf_in.weights[src_offset + 1u + ubo.width];\n let sum = dot(vec4(a, b, c, d), vec4(1.0));\n\n buf_out.weights[dst_offset] = sum / 4.0;\n\n let probabilities = vec4(a, a+b, a+b+c, sum) / max(sum, 0.0001);\n textureStore(tex_out, vec2(coord.xy), probabilities);\n }\n}\n",s="src/sample/particles/main.ts";let u=async e=>{let n,{canvas:t,pageState:a,gui:s}=e,u=await navigator.gpu.requestAdapter(),l=await u.requestDevice();if(!a.active)return;let c=t.getContext("webgpu"),d=window.devicePixelRatio;t.width=t.clientWidth*d,t.height=t.clientHeight*d;let p=navigator.gpu.getPreferredCanvasFormat();c.configure({device:l,format:p,alphaMode:"premultiplied"});let f=l.createBuffer({size:24e5,usage:GPUBufferUsage.VERTEX|GPUBufferUsage.STORAGE}),m=l.createRenderPipeline({layout:"auto",vertex:{module:l.createShaderModule({code:i}),entryPoint:"vs_main",buffers:[{arrayStride:48,stepMode:"instance",attributes:[{shaderLocation:0,offset:0,format:"float32x3"},{shaderLocation:1,offset:16,format:"float32x4"}]},{arrayStride:8,stepMode:"vertex",attributes:[{shaderLocation:2,offset:0,format:"float32x2"}]}]},fragment:{module:l.createShaderModule({code:i}),entryPoint:"fs_main",targets:[{format:p,blend:{color:{srcFactor:"src-alpha",dstFactor:"one",operation:"add"},alpha:{srcFactor:"zero",dstFactor:"one",operation:"add"}}}]},primitive:{topology:"triangle-list"},depthStencil:{depthWriteEnabled:!1,depthCompare:"less",format:"depth24plus"}}),v=l.createTexture({size:[t.width,t.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),g=l.createBuffer({size:96,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),h=l.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:g}}]}),b={colorAttachments:[{view:void 0,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:v.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},x=l.createBuffer({size:48,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(x.getMappedRange()).set([-1,-1,1,-1,-1,1,-1,1,1,-1,1,1]),x.unmap();let _=1,w=1,P=1;{let y=await fetch("../assets/img/webgpu.png"),B=await createImageBitmap(await y.blob());for(;_>C,L=w>>C,R=0==C?E.getBindGroupLayout(0):U.getBindGroupLayout(0),z=l.createBindGroup({layout:R,entries:[{binding:0,resource:{buffer:G}},{binding:1,resource:{buffer:1&C?T:M}},{binding:2,resource:{buffer:1&C?M:T}},{binding:3,resource:n.createView({format:"rgba8unorm",dimension:"2d",baseMipLevel:C,mipLevelCount:1})}]});if(0==C){let A=S.beginComputePass();A.setPipeline(E),A.setBindGroup(0,z),A.dispatchWorkgroups(Math.ceil(O/64),L),A.end()}else{let F=S.beginComputePass();F.setPipeline(U),F.setBindGroup(0,z),F.dispatchWorkgroups(Math.ceil(O/64),L),F.end()}}l.queue.submit([S.finish()])}let I={simulate:!0,deltaTime:.04},V=l.createBuffer({size:32,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});Object.keys(I).forEach(e=>{s.add(I,e)});let q=l.createComputePipeline({layout:"auto",compute:{module:l.createShaderModule({code:i}),entryPoint:"simulate"}}),j=l.createBindGroup({layout:q.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:V}},{binding:1,resource:{buffer:f,offset:0,size:24e5}},{binding:2,resource:n.createView()}]}),N=t.width/t.height,W=r._E.perspective(2*Math.PI/5,N,1,100),k=r._E.create(),D=r._E.create();requestAnimationFrame(function e(){if(!a.active)return;l.queue.writeBuffer(V,0,new Float32Array([I.simulate?I.deltaTime:0,0,0,0,100*Math.random(),100*Math.random(),1+Math.random(),1+Math.random()])),r._E.identity(k),r._E.translate(k,r.R3.fromValues(0,0,-3),k),r._E.rotateX(k,-.2*Math.PI,k),r._E.multiply(W,k,D),l.queue.writeBuffer(g,0,new Float32Array([D[0],D[1],D[2],D[3],D[4],D[5],D[6],D[7],D[8],D[9],D[10],D[11],D[12],D[13],D[14],D[15],k[0],k[4],k[8],0,k[1],k[5],k[9],0]));let n=c.getCurrentTexture();b.colorAttachments[0].view=n.createView();let t=l.createCommandEncoder();{let i=t.beginComputePass();i.setPipeline(q),i.setBindGroup(0,j),i.dispatchWorkgroups(Math.ceil(781.25)),i.end()}{let o=t.beginRenderPass(b);o.setPipeline(m),o.setBindGroup(0,h),o.setVertexBuffer(0,f),o.setVertexBuffer(1,x),o.draw(6,5e4,0,0),o.end()}l.queue.submit([t.finish()]),requestAnimationFrame(e)})},l=()=>(0,a.Tl)({name:"Particles",description:"This example demonstrates rendering of particles simulated with compute shaders.",gui:!0,init:u,sources:[{name:s.substring(21),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport particleWGSL from './particle.wgsl';\nimport probabilityMapWGSL from './probabilityMap.wgsl';\n\nconst numParticles = 50000;\nconst particlePositionOffset = 0;\nconst particleColorOffset = 4 * 4;\nconst particleInstanceByteSize =\n 3 * 4 + // position\n 1 * 4 + // lifetime\n 4 * 4 + // color\n 3 * 4 + // velocity\n 1 * 4 + // padding\n 0;\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const particlesBuffer = device.createBuffer({\n size: numParticles * particleInstanceByteSize,\n usage: GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE,\n });\n\n const renderPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: particleWGSL,\n }),\n entryPoint: 'vs_main',\n buffers: [\n {\n // instanced particles buffer\n arrayStride: particleInstanceByteSize,\n stepMode: 'instance',\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: particlePositionOffset,\n format: 'float32x3',\n },\n {\n // color\n shaderLocation: 1,\n offset: particleColorOffset,\n format: 'float32x4',\n },\n ],\n },\n {\n // quad vertex buffer\n arrayStride: 2 * 4, // vec2\n stepMode: 'vertex',\n attributes: [\n {\n // vertex positions\n shaderLocation: 2,\n offset: 0,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: particleWGSL,\n }),\n entryPoint: 'fs_main',\n targets: [\n {\n format: presentationFormat,\n blend: {\n color: {\n srcFactor: 'src-alpha',\n dstFactor: 'one',\n operation: 'add',\n },\n alpha: {\n srcFactor: 'zero',\n dstFactor: 'one',\n operation: 'add',\n },\n },\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n\n depthStencil: {\n depthWriteEnabled: false,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize =\n 4 * 4 * 4 + // modelViewProjectionMatrix : mat4x4\n 3 * 4 + // right : vec3\n 4 + // padding\n 3 * 4 + // up : vec3\n 4 + // padding\n 0;\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: renderPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n //////////////////////////////////////////////////////////////////////////////\n // Quad vertex buffer\n //////////////////////////////////////////////////////////////////////////////\n const quadVertexBuffer = device.createBuffer({\n size: 6 * 2 * 4, // 6x vec2\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n // prettier-ignore\n const vertexData = [\n -1.0, -1.0, +1.0, -1.0, -1.0, +1.0, -1.0, +1.0, +1.0, -1.0, +1.0, +1.0,\n ];\n new Float32Array(quadVertexBuffer.getMappedRange()).set(vertexData);\n quadVertexBuffer.unmap();\n\n //////////////////////////////////////////////////////////////////////////////\n // Texture\n //////////////////////////////////////////////////////////////////////////////\n let texture: GPUTexture;\n let textureWidth = 1;\n let textureHeight = 1;\n let numMipLevels = 1;\n {\n const response = await fetch('../assets/img/webgpu.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n // Calculate number of mip levels required to generate the probability map\n while (\n textureWidth < imageBitmap.width ||\n textureHeight < imageBitmap.height\n ) {\n textureWidth *= 2;\n textureHeight *= 2;\n numMipLevels++;\n }\n texture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n mipLevelCount: numMipLevels,\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.STORAGE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: texture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n //////////////////////////////////////////////////////////////////////////////\n // Probability map generation\n // The 0'th mip level of texture holds the color data and spawn-probability in\n // the alpha channel. The mip levels 1..N are generated to hold spawn\n // probabilities up to the top 1x1 mip level.\n //////////////////////////////////////////////////////////////////////////////\n {\n const probabilityMapImportLevelPipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({ code: probabilityMapWGSL }),\n entryPoint: 'import_level',\n },\n });\n const probabilityMapExportLevelPipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({ code: probabilityMapWGSL }),\n entryPoint: 'export_level',\n },\n });\n\n const probabilityMapUBOBufferSize =\n 1 * 4 + // stride\n 3 * 4 + // padding\n 0;\n const probabilityMapUBOBuffer = device.createBuffer({\n size: probabilityMapUBOBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n const buffer_a = device.createBuffer({\n size: textureWidth * textureHeight * 4,\n usage: GPUBufferUsage.STORAGE,\n });\n const buffer_b = device.createBuffer({\n size: textureWidth * textureHeight * 4,\n usage: GPUBufferUsage.STORAGE,\n });\n device.queue.writeBuffer(\n probabilityMapUBOBuffer,\n 0,\n new Int32Array([textureWidth])\n );\n const commandEncoder = device.createCommandEncoder();\n for (let level = 0; level < numMipLevels; level++) {\n const levelWidth = textureWidth >> level;\n const levelHeight = textureHeight >> level;\n const pipeline =\n level == 0\n ? probabilityMapImportLevelPipeline.getBindGroupLayout(0)\n : probabilityMapExportLevelPipeline.getBindGroupLayout(0);\n const probabilityMapBindGroup = device.createBindGroup({\n layout: pipeline,\n entries: [\n {\n // ubo\n binding: 0,\n resource: { buffer: probabilityMapUBOBuffer },\n },\n {\n // buf_in\n binding: 1,\n resource: { buffer: level & 1 ? buffer_a : buffer_b },\n },\n {\n // buf_out\n binding: 2,\n resource: { buffer: level & 1 ? buffer_b : buffer_a },\n },\n {\n // tex_in / tex_out\n binding: 3,\n resource: texture.createView({\n format: 'rgba8unorm',\n dimension: '2d',\n baseMipLevel: level,\n mipLevelCount: 1,\n }),\n },\n ],\n });\n if (level == 0) {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(probabilityMapImportLevelPipeline);\n passEncoder.setBindGroup(0, probabilityMapBindGroup);\n passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);\n passEncoder.end();\n } else {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(probabilityMapExportLevelPipeline);\n passEncoder.setBindGroup(0, probabilityMapBindGroup);\n passEncoder.dispatchWorkgroups(Math.ceil(levelWidth / 64), levelHeight);\n passEncoder.end();\n }\n }\n device.queue.submit([commandEncoder.finish()]);\n }\n\n //////////////////////////////////////////////////////////////////////////////\n // Simulation compute pipeline\n //////////////////////////////////////////////////////////////////////////////\n const simulationParams = {\n simulate: true,\n deltaTime: 0.04,\n };\n\n const simulationUBOBufferSize =\n 1 * 4 + // deltaTime\n 3 * 4 + // padding\n 4 * 4 + // seed\n 0;\n const simulationUBOBuffer = device.createBuffer({\n size: simulationUBOBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n Object.keys(simulationParams).forEach((k) => {\n gui.add(simulationParams, k);\n });\n\n const computePipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: particleWGSL,\n }),\n entryPoint: 'simulate',\n },\n });\n const computeBindGroup = device.createBindGroup({\n layout: computePipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: simulationUBOBuffer,\n },\n },\n {\n binding: 1,\n resource: {\n buffer: particlesBuffer,\n offset: 0,\n size: numParticles * particleInstanceByteSize,\n },\n },\n {\n binding: 2,\n resource: texture.createView(),\n },\n ],\n });\n\n const aspect = canvas.width / canvas.height;\n const projection = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 100.0);\n const view = mat4.create();\n const mvp = mat4.create();\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n device.queue.writeBuffer(\n simulationUBOBuffer,\n 0,\n new Float32Array([\n simulationParams.simulate ? simulationParams.deltaTime : 0.0,\n 0.0,\n 0.0,\n 0.0, // padding\n Math.random() * 100,\n Math.random() * 100, // seed.xy\n 1 + Math.random(),\n 1 + Math.random(), // seed.zw\n ])\n );\n\n mat4.identity(view);\n mat4.translate(view, vec3.fromValues(0, 0, -3), view);\n mat4.rotateX(view, Math.PI * -0.2, view);\n mat4.multiply(projection, view, mvp);\n\n // prettier-ignore\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n new Float32Array([\n // modelViewProjectionMatrix\n mvp[0], mvp[1], mvp[2], mvp[3],\n mvp[4], mvp[5], mvp[6], mvp[7],\n mvp[8], mvp[9], mvp[10], mvp[11],\n mvp[12], mvp[13], mvp[14], mvp[15],\n\n view[0], view[4], view[8], // right\n\n 0, // padding\n\n view[1], view[5], view[9], // up\n\n 0, // padding\n ])\n );\n const swapChainTexture = context.getCurrentTexture();\n // prettier-ignore\n renderPassDescriptor.colorAttachments[0].view = swapChainTexture.createView();\n\n const commandEncoder = device.createCommandEncoder();\n {\n const passEncoder = commandEncoder.beginComputePass();\n passEncoder.setPipeline(computePipeline);\n passEncoder.setBindGroup(0, computeBindGroup);\n passEncoder.dispatchWorkgroups(Math.ceil(numParticles / 64));\n passEncoder.end();\n }\n {\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(renderPipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, particlesBuffer);\n passEncoder.setVertexBuffer(1, quadVertexBuffer);\n passEncoder.draw(6, numParticles, 0, 0);\n passEncoder.end();\n }\n\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst Particles: () => JSX.Element = () =>\n makeSample({\n name: 'Particles',\n description:\n 'This example demonstrates rendering of particles simulated with compute shaders.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './particle.wgsl',\n contents: particleWGSL,\n editable: true,\n },\n {\n name: './probabilityMap.wgsl',\n contents: probabilityMapWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default Particles;\n"},{name:"./particle.wgsl",contents:i,editable:!0},{name:"./probabilityMap.wgsl",contents:o,editable:!0}],filename:s});var c=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}}}]); \ No newline at end of file diff --git a/_next/static/chunks/31.48d6f46eea121502.js b/_next/static/chunks/31.48d6f46eea121502.js new file mode 100644 index 00000000..70021cb1 --- /dev/null +++ b/_next/static/chunks/31.48d6f46eea121502.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[31],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return m}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),c=t.n(l);t(7319);let u=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),u=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),m=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),v=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,h]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(v?h(v[1]):h(a[0].name),u&&l.current)for(l.current.appendChild(u.domElement);u.__controllers.length>0;)u.__controllers[0].remove();m&&d.current&&(m.dom.style.position="absolute",m.showPanel(1),d.current.appendChild(m.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:u,stats:m});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:c().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:c().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){h(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(u,{...e});function m(e,n){if(!e)throw Error(n)}},7031:function(e,n,t){"use strict";var r="src/sample/videoUploadingWebCodecs/main.ts";t.r(n);var a=t(5671),i=t(134),o=t(7618);let s=async e=>{let{canvas:n,pageState:t,gui:r}=e,a=document.createElement("video");a.loop=!0,a.autoplay=!0,a.muted=!0,a.src="../assets/video/pano.webm",await a.play();let s=await navigator.gpu.requestAdapter(),l=await s.requestDevice();if(!t.active)return;let c=n.getContext("webgpu"),u=window.devicePixelRatio;n.width=n.clientWidth*u,n.height=n.clientHeight*u;let d=navigator.gpu.getPreferredCanvasFormat();c.configure({device:l,format:d,alphaMode:"premultiplied"});let m=l.createRenderPipeline({layout:"auto",vertex:{module:l.createShaderModule({code:i.Z}),entryPoint:"vert_main"},fragment:{module:l.createShaderModule({code:o.Z}),entryPoint:"main",targets:[{format:d}]},primitive:{topology:"triangle-list"}}),p=l.createSampler({magFilter:"linear",minFilter:"linear"}),v={requestFrame:"requestAnimationFrame"};function g(){if(!t.active)return;let e=new VideoFrame(a),n=l.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:1,resource:p},{binding:2,resource:l.importExternalTexture({source:e})}]}),r=l.createCommandEncoder(),i=c.getCurrentTexture().createView(),o=r.beginRenderPass({colorAttachments:[{view:i,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}]});o.setPipeline(m),o.setBindGroup(0,n),o.draw(6),o.end(),l.queue.submit([r.finish()]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)}r.add(v,"requestFrame",["requestAnimationFrame","requestVideoFrameCallback"]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)},l=()=>(0,a.Tl)({name:"Video Uploading with WebCodecs",description:"This example shows how to upload a WebCodecs VideoFrame to WebGPU.",gui:!0,init:s,sources:[{name:r.substring(35),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';\nimport sampleExternalTextureWGSL from '../../shaders/sampleExternalTexture.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n // Set video element\n const video = document.createElement('video');\n video.loop = true;\n video.autoplay = true;\n video.muted = true;\n video.src = '../assets/video/pano.webm';\n await video.play();\n\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'vert_main',\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleExternalTextureWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const settings = {\n requestFrame: 'requestAnimationFrame',\n };\n\n gui.add(settings, 'requestFrame', [\n 'requestAnimationFrame',\n 'requestVideoFrameCallback',\n ]);\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const videoFrame = new VideoFrame(video);\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: device.importExternalTexture({\n source: videoFrame as any, // eslint-disable-line @typescript-eslint/no-explicit-any\n }),\n },\n ],\n });\n\n const commandEncoder = device.createCommandEncoder();\n const textureView = context.getCurrentTexture().createView();\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: textureView,\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.draw(6);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n }\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n};\n\nconst VideoUploadingWebCodecs: () => JSX.Element = () =>\n makeSample({\n name: 'Video Uploading with WebCodecs',\n description: `This example shows how to upload a WebCodecs VideoFrame to WebGPU.`,\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/fullscreenTexturedQuad.wgsl',\n contents: fullscreenTexturedQuadWGSL,\n editable: true,\n },\n {\n name: '../../shaders/sampleExternalTexture.wgsl',\n contents: sampleExternalTextureWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default VideoUploadingWebCodecs;\n"},{name:"../../shaders/fullscreenTexturedQuad.wgsl",contents:i.Z,editable:!0},{name:"../../shaders/sampleExternalTexture.wgsl",contents:o.Z,editable:!0}],filename:r});n.default=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},134:function(e,n){"use strict";n.Z="@group(0) @binding(0) var mySampler : sampler;\n@group(0) @binding(1) var myTexture : texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n}\n\n@vertex\nfn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {\n const pos = array(\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2(-1.0, 1.0),\n );\n\n const uv = array(\n vec2(1.0, 0.0),\n vec2(1.0, 1.0),\n vec2(0.0, 1.0),\n vec2(1.0, 0.0),\n vec2(0.0, 1.0),\n vec2(0.0, 0.0),\n );\n\n var output : VertexOutput;\n output.Position = vec4(pos[VertexIndex], 0.0, 1.0);\n output.fragUV = uv[VertexIndex];\n return output;\n}\n\n@fragment\nfn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV);\n}\n"},7618:function(e,n){"use strict";n.Z="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_external;\n\n@fragment\nfn main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSampleBaseClampToEdge(myTexture, mySampler, fragUV);\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/31.9edde3db065401fc.js b/_next/static/chunks/31.9edde3db065401fc.js deleted file mode 100644 index b2f39d1e..00000000 --- a/_next/static/chunks/31.9edde3db065401fc.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[31],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return m}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),c=t.n(l);t(7319);let u=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),u=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),m=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),v=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,h]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(v?h(v[1]):h(a[0].name),u&&l.current)for(l.current.appendChild(u.domElement);u.__controllers.length>0;)u.__controllers[0].remove();m&&d.current&&(m.dom.style.position="absolute",m.showPanel(1),d.current.appendChild(m.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:u,stats:m});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:c().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:c().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){h(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(u,{...e});function m(e,n){if(!e)throw Error(n)}},7031:function(e,n,t){"use strict";var r="src/sample/videoUploadingWebCodecs/main.ts";t.r(n);var a=t(5671),i=t(134),o=t(7618);let s=async e=>{let{canvas:n,pageState:t,gui:r}=e,a=document.createElement("video");a.loop=!0,a.autoplay=!0,a.muted=!0,a.src="/assets/video/pano.webm",await a.play();let s=await navigator.gpu.requestAdapter(),l=await s.requestDevice();if(!t.active)return;let c=n.getContext("webgpu"),u=window.devicePixelRatio;n.width=n.clientWidth*u,n.height=n.clientHeight*u;let d=navigator.gpu.getPreferredCanvasFormat();c.configure({device:l,format:d,alphaMode:"premultiplied"});let m=l.createRenderPipeline({layout:"auto",vertex:{module:l.createShaderModule({code:i.Z}),entryPoint:"vert_main"},fragment:{module:l.createShaderModule({code:o.Z}),entryPoint:"main",targets:[{format:d}]},primitive:{topology:"triangle-list"}}),p=l.createSampler({magFilter:"linear",minFilter:"linear"}),v={requestFrame:"requestAnimationFrame"};function g(){if(!t.active)return;let e=new VideoFrame(a),n=l.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:1,resource:p},{binding:2,resource:l.importExternalTexture({source:e})}]}),r=l.createCommandEncoder(),i=c.getCurrentTexture().createView(),o=r.beginRenderPass({colorAttachments:[{view:i,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}]});o.setPipeline(m),o.setBindGroup(0,n),o.draw(6),o.end(),l.queue.submit([r.finish()]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)}r.add(v,"requestFrame",["requestAnimationFrame","requestVideoFrameCallback"]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)},l=()=>(0,a.Tl)({name:"Video Uploading with WebCodecs",description:"This example shows how to upload a WebCodecs VideoFrame to WebGPU.",gui:!0,init:s,sources:[{name:r.substring(35),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';\nimport sampleExternalTextureWGSL from '../../shaders/sampleExternalTexture.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n // Set video element\n const video = document.createElement('video');\n video.loop = true;\n video.autoplay = true;\n video.muted = true;\n video.src = '/assets/video/pano.webm';\n await video.play();\n\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'vert_main',\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleExternalTextureWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const settings = {\n requestFrame: 'requestAnimationFrame',\n };\n\n gui.add(settings, 'requestFrame', [\n 'requestAnimationFrame',\n 'requestVideoFrameCallback',\n ]);\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const videoFrame = new VideoFrame(video);\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: device.importExternalTexture({\n source: videoFrame as any, // eslint-disable-line @typescript-eslint/no-explicit-any\n }),\n },\n ],\n });\n\n const commandEncoder = device.createCommandEncoder();\n const textureView = context.getCurrentTexture().createView();\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: textureView,\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.draw(6);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n }\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n};\n\nconst VideoUploadingWebCodecs: () => JSX.Element = () =>\n makeSample({\n name: 'Video Uploading with WebCodecs',\n description: `This example shows how to upload a WebCodecs VideoFrame to WebGPU.`,\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/fullscreenTexturedQuad.wgsl',\n contents: fullscreenTexturedQuadWGSL,\n editable: true,\n },\n {\n name: '../../shaders/sampleExternalTexture.wgsl',\n contents: sampleExternalTextureWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default VideoUploadingWebCodecs;\n"},{name:"../../shaders/fullscreenTexturedQuad.wgsl",contents:i.Z,editable:!0},{name:"../../shaders/sampleExternalTexture.wgsl",contents:o.Z,editable:!0}],filename:r});n.default=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},134:function(e,n){"use strict";n.Z="@group(0) @binding(0) var mySampler : sampler;\n@group(0) @binding(1) var myTexture : texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n}\n\n@vertex\nfn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {\n const pos = array(\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2(-1.0, 1.0),\n );\n\n const uv = array(\n vec2(1.0, 0.0),\n vec2(1.0, 1.0),\n vec2(0.0, 1.0),\n vec2(1.0, 0.0),\n vec2(0.0, 1.0),\n vec2(0.0, 0.0),\n );\n\n var output : VertexOutput;\n output.Position = vec4(pos[VertexIndex], 0.0, 1.0);\n output.fragUV = uv[VertexIndex];\n return output;\n}\n\n@fragment\nfn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV);\n}\n"},7618:function(e,n){"use strict";n.Z="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_external;\n\n@fragment\nfn main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSampleBaseClampToEdge(myTexture, mySampler, fragUV);\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/428.0485846e4c9c50a8.js b/_next/static/chunks/428.0485846e4c9c50a8.js deleted file mode 100644 index 11632b7d..00000000 --- a/_next/static/chunks/428.0485846e4c9c50a8.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[428],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return m}});var r=t(5893),a=t(9008),i=t.n(a),s=t(1163),o=t(7294),u=t(9147),c=t.n(u);t(7319);let d=e=>{let n=(0,o.useRef)(null),a=(0,o.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),u=(0,o.useRef)(null),d=(0,o.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),l=(0,o.useRef)(null),m=(0,o.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),h=(0,s.useRouter)(),p=h.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[f,g]=(0,o.useState)(null),[v,x]=(0,o.useState)(null);return(0,o.useEffect)(()=>{if(p?x(p[1]):x(a[0].name),d&&u.current)for(u.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();m&&l.current&&(m.dom.style.position="absolute",m.showPanel(1),l.current.appendChild(m.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let s=e.init({canvas:i,pageState:t,gui:d,stats:m});s instanceof Promise&&s.catch(e=>{console.error(e),g(e)})}catch(o){console.error(o),g(o)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),f?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(f)})]}):null]}),(0,r.jsxs)("div",{className:c().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:c().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":v==e.name},n))]})]})},l=e=>(0,r.jsx)(d,{...e});function m(e,n){if(!e)throw Error(n)}},3428:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return d}});var r=t(6416),a=t(5671);let i={vertexStride:32,positionsOffset:0,normalOffset:12,uvOffset:24};var s="struct Uniforms {\n viewProjectionMatrix : mat4x4f\n}\n@group(0) @binding(0) var uniforms : Uniforms;\n\n@group(1) @binding(0) var modelMatrix : mat4x4f;\n\nstruct VertexInput {\n @location(0) position : vec4f,\n @location(1) normal : vec3f,\n @location(2) uv : vec2f\n}\n\nstruct VertexOutput {\n @builtin(position) position : vec4f,\n @location(0) normal: vec3f,\n @location(1) uv : vec2f,\n}\n\n@vertex\nfn vertexMain(input: VertexInput) -> VertexOutput {\n var output : VertexOutput;\n output.position = uniforms.viewProjectionMatrix * modelMatrix * input.position;\n output.normal = normalize((modelMatrix * vec4(input.normal, 0)).xyz);\n output.uv = input.uv;\n return output;\n}\n\n@group(1) @binding(1) var meshSampler: sampler;\n@group(1) @binding(2) var meshTexture: texture_2d;\n\n// Static directional lighting\nconst lightDir = vec3f(1, 1, 1);\nconst dirColor = vec3(1);\nconst ambientColor = vec3f(0.05);\n\n@fragment\nfn fragmentMain(input: VertexOutput) -> @location(0) vec4f {\n let textureColor = textureSample(meshTexture, meshSampler, input.uv);\n\n // Very simplified lighting algorithm.\n let lightColor = saturate(ambientColor + max(dot(input.normal, lightDir), 0.0) * dirColor);\n\n return vec4f(textureColor.rgb * lightColor, textureColor.a);\n}",o="src/sample/renderBundles/main.ts";let u=async e=>{let n,t,a,{canvas:o,pageState:u,gui:c,stats:d}=e,l=await navigator.gpu.requestAdapter(),m=await l.requestDevice();if(!u.active)return;let h={useRenderBundles:!0,asteroidCount:5e3};c.add(h,"useRenderBundles"),c.add(h,"asteroidCount",1e3,1e4,1e3).onChange(()=>{_(),L()});let p=o.getContext("webgpu"),f=window.devicePixelRatio;o.width=o.clientWidth*f,o.height=o.clientHeight*f;let g=navigator.gpu.getPreferredCanvasFormat();p.configure({device:m,format:g,alphaMode:"premultiplied"});let v=m.createShaderModule({code:s}),x=m.createRenderPipeline({layout:"auto",vertex:{module:v,entryPoint:"vertexMain",buffers:[{arrayStride:i.vertexStride,attributes:[{shaderLocation:0,offset:i.positionsOffset,format:"float32x3"},{shaderLocation:1,offset:i.normalOffset,format:"float32x3"},{shaderLocation:2,offset:i.uvOffset,format:"float32x2"}]}]},fragment:{module:v,entryPoint:"fragmentMain",targets:[{format:g}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),b=m.createTexture({size:[o.width,o.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),w=m.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let y=await fetch("/assets/img/saturn.jpg"),M=await createImageBitmap(await y.blob());n=m.createTexture({size:[M.width,M.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),m.queue.copyExternalImageToTexture({source:M},{texture:n},[M.width,M.height])}{let S=await fetch("/assets/img/moon.jpg"),B=await createImageBitmap(await S.blob());t=m.createTexture({size:[B.width,B.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),m.queue.copyExternalImageToTexture({source:B},{texture:t},[B.width,B.height])}let P=m.createSampler({magFilter:"linear",minFilter:"linear"});function T(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:32,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:16,a=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,i=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:32,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:16,a=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,i=[],s=[];n=Math.max(3,Math.floor(n)),t=Math.max(2,Math.floor(t));let o=r.R3.create(),u=r.R3.create(),c=r.R3.create(),d=0,l=[];for(let m=0;m<=t;m++){let h=[],p=m/t,f=0;0===m?f=.5/n:m===t&&(f=-.5/n);for(let g=0;g<=n;g++){let v=g/n;if(g==n)r.R3.copy(o,u);else if(0==g||0!=m&&m!==t){let x=e+(Math.random()-.5)*2*a*e;u[0]=-x*Math.cos(v*Math.PI*2)*Math.sin(p*Math.PI),u[1]=x*Math.cos(p*Math.PI),u[2]=x*Math.sin(v*Math.PI*2)*Math.sin(p*Math.PI),0==g&&r.R3.copy(u,o)}i.push(...u),r.R3.copy(u,c),r.R3.normalize(c,c),i.push(...c),i.push(v+f,1-p),h.push(d++)}l.push(h)}for(let b=0;bh.asteroidCount)break}function L(){let e=m.createRenderBundleEncoder({colorFormats:[g],depthStencilFormat:"depth24plus"});N(e),a=e.finish()}L(),requestAnimationFrame(function e(){if(!u.active)return;d.begin();let n=function(){let e=r._E.identity();r._E.translate(e,r.R3.fromValues(0,0,-4),e);let n=Date.now()/1e3;return r._E.rotateZ(e,.1*Math.PI,e),r._E.rotateX(e,.1*Math.PI,e),r._E.rotateY(e,.05*n,e),r._E.multiply(O,e,j),j}();m.queue.writeBuffer(w,0,n.buffer,n.byteOffset,n.byteLength),I.colorAttachments[0].view=p.getCurrentTexture().createView();let t=m.createCommandEncoder(),i=t.beginRenderPass(I);h.useRenderBundles?i.executeBundles([a]):N(i),i.end(),m.queue.submit([t.finish()]),d.end(),requestAnimationFrame(e)})},c=()=>(0,a.Tl)({name:"Render Bundles",description:"This example shows how to use render bundles. It renders a large number of\n meshes individually as a proxy for a more complex scene in order to demonstrate the reduction\n in JavaScript time spent to issue render commands. (Typically a scene like this would make use\n of instancing to reduce draw overhead.)",gui:!0,stats:!0,init:u,sources:[{name:o.substring(25),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport { createSphereMesh, SphereLayout } from '../../meshes/sphere';\n\nimport meshWGSL from './mesh.wgsl';\n\ninterface Renderable {\n vertices: GPUBuffer;\n indices: GPUBuffer;\n indexCount: number;\n bindGroup?: GPUBindGroup;\n}\n\nconst init: SampleInit = async ({ canvas, pageState, gui, stats }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const settings = {\n useRenderBundles: true,\n asteroidCount: 5000,\n };\n gui.add(settings, 'useRenderBundles');\n gui.add(settings, 'asteroidCount', 1000, 10000, 1000).onChange(() => {\n // If the content of the scene changes the render bundle must be recreated.\n ensureEnoughAsteroids();\n updateRenderBundle();\n });\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const shaderModule = device.createShaderModule({\n code: meshWGSL,\n });\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: shaderModule,\n entryPoint: 'vertexMain',\n buffers: [\n {\n arrayStride: SphereLayout.vertexStride,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: SphereLayout.positionsOffset,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: SphereLayout.normalOffset,\n format: 'float32x3',\n },\n {\n // uv\n shaderLocation: 2,\n offset: SphereLayout.uvOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: shaderModule,\n entryPoint: 'fragmentMain',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Backface culling since the sphere is solid piece of geometry.\n // Faces pointing away from the camera will be occluded by faces\n // pointing toward the camera.\n cullMode: 'back',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the images and upload them into a GPUTexture.\n let planetTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/saturn.jpg');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n planetTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: planetTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n let moonTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/moon.jpg');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n moonTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: moonTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n // Helper functions to create the required meshes and bind groups for each sphere.\n function createSphereRenderable(\n radius: number,\n widthSegments = 32,\n heightSegments = 16,\n randomness = 0\n ): Renderable {\n const sphereMesh = createSphereMesh(\n radius,\n widthSegments,\n heightSegments,\n randomness\n );\n\n // Create a vertex buffer from the sphere data.\n const vertices = device.createBuffer({\n size: sphereMesh.vertices.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(vertices.getMappedRange()).set(sphereMesh.vertices);\n vertices.unmap();\n\n const indices = device.createBuffer({\n size: sphereMesh.indices.byteLength,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n new Uint16Array(indices.getMappedRange()).set(sphereMesh.indices);\n indices.unmap();\n\n return {\n vertices,\n indices,\n indexCount: sphereMesh.indices.length,\n };\n }\n\n function createSphereBindGroup(\n texture: GPUTexture,\n transform: Float32Array\n ): GPUBindGroup {\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n mappedAtCreation: true,\n });\n new Float32Array(uniformBuffer.getMappedRange()).set(transform);\n uniformBuffer.unmap();\n\n const bindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: texture.createView(),\n },\n ],\n });\n\n return bindGroup;\n }\n\n const transform = mat4.create() as Float32Array;\n mat4.identity(transform);\n\n // Create one large central planet surrounded by a large ring of asteroids\n const planet = createSphereRenderable(1.0);\n planet.bindGroup = createSphereBindGroup(planetTexture, transform);\n\n const asteroids = [\n createSphereRenderable(0.01, 8, 6, 0.15),\n createSphereRenderable(0.013, 8, 6, 0.15),\n createSphereRenderable(0.017, 8, 6, 0.15),\n createSphereRenderable(0.02, 8, 6, 0.15),\n createSphereRenderable(0.03, 16, 8, 0.15),\n ];\n\n const renderables = [planet];\n\n function ensureEnoughAsteroids() {\n for (let i = renderables.length; i <= settings.asteroidCount; ++i) {\n // Place copies of the asteroid in a ring.\n const radius = Math.random() * 1.7 + 1.25;\n const angle = Math.random() * Math.PI * 2;\n const x = Math.sin(angle) * radius;\n const y = (Math.random() - 0.5) * 0.015;\n const z = Math.cos(angle) * radius;\n\n mat4.identity(transform);\n mat4.translate(transform, [x, y, z], transform);\n mat4.rotateX(transform, Math.random() * Math.PI, transform);\n mat4.rotateY(transform, Math.random() * Math.PI, transform);\n renderables.push({\n ...asteroids[i % asteroids.length],\n bindGroup: createSphereBindGroup(moonTexture, transform),\n });\n }\n }\n ensureEnoughAsteroids();\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n const frameBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n ],\n });\n\n function getTransformationMatrix() {\n const viewMatrix = mat4.identity();\n mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);\n const now = Date.now() / 1000;\n // Tilt the view matrix so the planet looks like it's off-axis.\n mat4.rotateZ(viewMatrix, Math.PI * 0.1, viewMatrix);\n mat4.rotateX(viewMatrix, Math.PI * 0.1, viewMatrix);\n // Rotate the view matrix slowly so the planet appears to spin.\n mat4.rotateY(viewMatrix, now * 0.05, viewMatrix);\n\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n\n return modelViewProjectionMatrix as Float32Array;\n }\n\n // Render bundles function as partial, limited render passes, so we can use the\n // same code both to render the scene normally and to build the render bundle.\n function renderScene(\n passEncoder: GPURenderPassEncoder | GPURenderBundleEncoder\n ) {\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, frameBindGroup);\n\n // Loop through every renderable object and draw them individually.\n // (Because many of these meshes are repeated, with only the transforms\n // differing, instancing would be highly effective here. This sample\n // intentionally avoids using instancing in order to emulate a more complex\n // scene, which helps demonstrate the potential time savings a render bundle\n // can provide.)\n let count = 0;\n for (const renderable of renderables) {\n passEncoder.setBindGroup(1, renderable.bindGroup);\n passEncoder.setVertexBuffer(0, renderable.vertices);\n passEncoder.setIndexBuffer(renderable.indices, 'uint16');\n passEncoder.drawIndexed(renderable.indexCount);\n\n if (++count > settings.asteroidCount) {\n break;\n }\n }\n }\n\n // The render bundle can be encoded once and re-used as many times as needed.\n // Because it encodes all of the commands needed to render at the GPU level,\n // those commands will not need to execute the associated JavaScript code upon\n // execution or be re-validated, which can represent a significant time savings.\n //\n // However, because render bundles are immutable once created, they are only\n // appropriate for rendering content where the same commands will be executed\n // every time, with the only changes being the contents of the buffers and\n // textures used. Cases where the executed commands differ from frame-to-frame,\n // such as when using frustrum or occlusion culling, will not benefit from\n // using render bundles as much.\n let renderBundle;\n function updateRenderBundle() {\n const renderBundleEncoder = device.createRenderBundleEncoder({\n colorFormats: [presentationFormat],\n depthStencilFormat: 'depth24plus',\n });\n renderScene(renderBundleEncoder);\n renderBundle = renderBundleEncoder.finish();\n }\n updateRenderBundle();\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n stats.begin();\n\n const transformationMatrix = getTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n transformationMatrix.buffer,\n transformationMatrix.byteOffset,\n transformationMatrix.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n\n if (settings.useRenderBundles) {\n // Executing a bundle is equivalent to calling all of the commands encoded\n // in the render bundle as part of the current render pass.\n passEncoder.executeBundles([renderBundle]);\n } else {\n // Alternatively, the same render commands can be encoded manually, which\n // can take longer since each command needs to be interpreted by the\n // JavaScript virtual machine and re-validated each time.\n renderScene(passEncoder);\n }\n\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n stats.end();\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst RenderBundles: () => JSX.Element = () =>\n makeSample({\n name: 'Render Bundles',\n description: `This example shows how to use render bundles. It renders a large number of\n meshes individually as a proxy for a more complex scene in order to demonstrate the reduction\n in JavaScript time spent to issue render commands. (Typically a scene like this would make use\n of instancing to reduce draw overhead.)`,\n gui: true,\n stats: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './mesh.wgsl',\n contents: meshWGSL,\n editable: true,\n },\n {\n name: '../../meshes/sphere.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/sphere.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default RenderBundles;\n"},{name:"./mesh.wgsl",contents:s,editable:!0},{name:"../../meshes/sphere.ts",contents:t(8557).Z}],filename:o});var d=c},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},8557:function(e,n){"use strict";n.Z="import { vec3 } from 'wgpu-matrix';\n\nexport interface SphereMesh {\n vertices: Float32Array;\n indices: Uint16Array;\n}\n\nexport const SphereLayout = {\n vertexStride: 8 * 4,\n positionsOffset: 0,\n normalOffset: 3 * 4,\n uvOffset: 6 * 4,\n};\n\n// Borrowed and simplified from https://github.com/mrdoob/three.js/blob/master/src/geometries/SphereGeometry.js\nexport function createSphereMesh(\n radius: number,\n widthSegments = 32,\n heightSegments = 16,\n randomness = 0\n): SphereMesh {\n const vertices = [];\n const indices = [];\n\n widthSegments = Math.max(3, Math.floor(widthSegments));\n heightSegments = Math.max(2, Math.floor(heightSegments));\n\n const firstVertex = vec3.create();\n const vertex = vec3.create();\n const normal = vec3.create();\n\n let index = 0;\n const grid = [];\n\n // generate vertices, normals and uvs\n for (let iy = 0; iy <= heightSegments; iy++) {\n const verticesRow = [];\n const v = iy / heightSegments;\n\n // special case for the poles\n let uOffset = 0;\n if (iy === 0) {\n uOffset = 0.5 / widthSegments;\n } else if (iy === heightSegments) {\n uOffset = -0.5 / widthSegments;\n }\n\n for (let ix = 0; ix <= widthSegments; ix++) {\n const u = ix / widthSegments;\n\n // Poles should just use the same position all the way around.\n if (ix == widthSegments) {\n vec3.copy(firstVertex, vertex);\n } else if (ix == 0 || (iy != 0 && iy !== heightSegments)) {\n const rr = radius + (Math.random() - 0.5) * 2 * randomness * radius;\n\n // vertex\n vertex[0] = -rr * Math.cos(u * Math.PI * 2) * Math.sin(v * Math.PI);\n vertex[1] = rr * Math.cos(v * Math.PI);\n vertex[2] = rr * Math.sin(u * Math.PI * 2) * Math.sin(v * Math.PI);\n\n if (ix == 0) {\n vec3.copy(vertex, firstVertex);\n }\n }\n\n vertices.push(...vertex);\n\n // normal\n vec3.copy(vertex, normal);\n vec3.normalize(normal, normal);\n vertices.push(...normal);\n\n // uv\n vertices.push(u + uOffset, 1 - v);\n verticesRow.push(index++);\n }\n\n grid.push(verticesRow);\n }\n\n // indices\n for (let iy = 0; iy < heightSegments; iy++) {\n for (let ix = 0; ix < widthSegments; ix++) {\n const a = grid[iy][ix + 1];\n const b = grid[iy][ix];\n const c = grid[iy + 1][ix];\n const d = grid[iy + 1][ix + 1];\n\n if (iy !== 0) indices.push(a, b, d);\n if (iy !== heightSegments - 1) indices.push(b, c, d);\n }\n }\n\n return {\n vertices: new Float32Array(vertices),\n indices: new Uint16Array(indices),\n };\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/428.5f30ed82e9153690.js b/_next/static/chunks/428.5f30ed82e9153690.js new file mode 100644 index 00000000..390e9aed --- /dev/null +++ b/_next/static/chunks/428.5f30ed82e9153690.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[428],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return m}});var r=t(5893),a=t(9008),i=t.n(a),s=t(1163),o=t(7294),u=t(9147),c=t.n(u);t(7319);let d=e=>{let n=(0,o.useRef)(null),a=(0,o.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),u=(0,o.useRef)(null),d=(0,o.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),l=(0,o.useRef)(null),m=(0,o.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),h=(0,s.useRouter)(),p=h.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[f,g]=(0,o.useState)(null),[v,x]=(0,o.useState)(null);return(0,o.useEffect)(()=>{if(p?x(p[1]):x(a[0].name),d&&u.current)for(u.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();m&&l.current&&(m.dom.style.position="absolute",m.showPanel(1),l.current.appendChild(m.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let s=e.init({canvas:i,pageState:t,gui:d,stats:m});s instanceof Promise&&s.catch(e=>{console.error(e),g(e)})}catch(o){console.error(o),g(o)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),f?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(f)})]}):null]}),(0,r.jsxs)("div",{className:c().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:u}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:c().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":v==e.name},n))]})]})},l=e=>(0,r.jsx)(d,{...e});function m(e,n){if(!e)throw Error(n)}},3428:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return d}});var r=t(6416),a=t(5671);let i={vertexStride:32,positionsOffset:0,normalOffset:12,uvOffset:24};var s="struct Uniforms {\n viewProjectionMatrix : mat4x4f\n}\n@group(0) @binding(0) var uniforms : Uniforms;\n\n@group(1) @binding(0) var modelMatrix : mat4x4f;\n\nstruct VertexInput {\n @location(0) position : vec4f,\n @location(1) normal : vec3f,\n @location(2) uv : vec2f\n}\n\nstruct VertexOutput {\n @builtin(position) position : vec4f,\n @location(0) normal: vec3f,\n @location(1) uv : vec2f,\n}\n\n@vertex\nfn vertexMain(input: VertexInput) -> VertexOutput {\n var output : VertexOutput;\n output.position = uniforms.viewProjectionMatrix * modelMatrix * input.position;\n output.normal = normalize((modelMatrix * vec4(input.normal, 0)).xyz);\n output.uv = input.uv;\n return output;\n}\n\n@group(1) @binding(1) var meshSampler: sampler;\n@group(1) @binding(2) var meshTexture: texture_2d;\n\n// Static directional lighting\nconst lightDir = vec3f(1, 1, 1);\nconst dirColor = vec3(1);\nconst ambientColor = vec3f(0.05);\n\n@fragment\nfn fragmentMain(input: VertexOutput) -> @location(0) vec4f {\n let textureColor = textureSample(meshTexture, meshSampler, input.uv);\n\n // Very simplified lighting algorithm.\n let lightColor = saturate(ambientColor + max(dot(input.normal, lightDir), 0.0) * dirColor);\n\n return vec4f(textureColor.rgb * lightColor, textureColor.a);\n}",o="src/sample/renderBundles/main.ts";let u=async e=>{let n,t,a,{canvas:o,pageState:u,gui:c,stats:d}=e,l=await navigator.gpu.requestAdapter(),m=await l.requestDevice();if(!u.active)return;let h={useRenderBundles:!0,asteroidCount:5e3};c.add(h,"useRenderBundles"),c.add(h,"asteroidCount",1e3,1e4,1e3).onChange(()=>{_(),L()});let p=o.getContext("webgpu"),f=window.devicePixelRatio;o.width=o.clientWidth*f,o.height=o.clientHeight*f;let g=navigator.gpu.getPreferredCanvasFormat();p.configure({device:m,format:g,alphaMode:"premultiplied"});let v=m.createShaderModule({code:s}),x=m.createRenderPipeline({layout:"auto",vertex:{module:v,entryPoint:"vertexMain",buffers:[{arrayStride:i.vertexStride,attributes:[{shaderLocation:0,offset:i.positionsOffset,format:"float32x3"},{shaderLocation:1,offset:i.normalOffset,format:"float32x3"},{shaderLocation:2,offset:i.uvOffset,format:"float32x2"}]}]},fragment:{module:v,entryPoint:"fragmentMain",targets:[{format:g}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),b=m.createTexture({size:[o.width,o.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),w=m.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let y=await fetch("../assets/img/saturn.jpg"),M=await createImageBitmap(await y.blob());n=m.createTexture({size:[M.width,M.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),m.queue.copyExternalImageToTexture({source:M},{texture:n},[M.width,M.height])}{let S=await fetch("../assets/img/moon.jpg"),B=await createImageBitmap(await S.blob());t=m.createTexture({size:[B.width,B.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),m.queue.copyExternalImageToTexture({source:B},{texture:t},[B.width,B.height])}let P=m.createSampler({magFilter:"linear",minFilter:"linear"});function T(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:32,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:16,a=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,i=function(e){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:32,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:16,a=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,i=[],s=[];n=Math.max(3,Math.floor(n)),t=Math.max(2,Math.floor(t));let o=r.R3.create(),u=r.R3.create(),c=r.R3.create(),d=0,l=[];for(let m=0;m<=t;m++){let h=[],p=m/t,f=0;0===m?f=.5/n:m===t&&(f=-.5/n);for(let g=0;g<=n;g++){let v=g/n;if(g==n)r.R3.copy(o,u);else if(0==g||0!=m&&m!==t){let x=e+(Math.random()-.5)*2*a*e;u[0]=-x*Math.cos(v*Math.PI*2)*Math.sin(p*Math.PI),u[1]=x*Math.cos(p*Math.PI),u[2]=x*Math.sin(v*Math.PI*2)*Math.sin(p*Math.PI),0==g&&r.R3.copy(u,o)}i.push(...u),r.R3.copy(u,c),r.R3.normalize(c,c),i.push(...c),i.push(v+f,1-p),h.push(d++)}l.push(h)}for(let b=0;bh.asteroidCount)break}function L(){let e=m.createRenderBundleEncoder({colorFormats:[g],depthStencilFormat:"depth24plus"});N(e),a=e.finish()}L(),requestAnimationFrame(function e(){if(!u.active)return;d.begin();let n=function(){let e=r._E.identity();r._E.translate(e,r.R3.fromValues(0,0,-4),e);let n=Date.now()/1e3;return r._E.rotateZ(e,.1*Math.PI,e),r._E.rotateX(e,.1*Math.PI,e),r._E.rotateY(e,.05*n,e),r._E.multiply(O,e,j),j}();m.queue.writeBuffer(w,0,n.buffer,n.byteOffset,n.byteLength),I.colorAttachments[0].view=p.getCurrentTexture().createView();let t=m.createCommandEncoder(),i=t.beginRenderPass(I);h.useRenderBundles?i.executeBundles([a]):N(i),i.end(),m.queue.submit([t.finish()]),d.end(),requestAnimationFrame(e)})},c=()=>(0,a.Tl)({name:"Render Bundles",description:"This example shows how to use render bundles. It renders a large number of\n meshes individually as a proxy for a more complex scene in order to demonstrate the reduction\n in JavaScript time spent to issue render commands. (Typically a scene like this would make use\n of instancing to reduce draw overhead.)",gui:!0,stats:!0,init:u,sources:[{name:o.substring(25),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport { createSphereMesh, SphereLayout } from '../../meshes/sphere';\n\nimport meshWGSL from './mesh.wgsl';\n\ninterface Renderable {\n vertices: GPUBuffer;\n indices: GPUBuffer;\n indexCount: number;\n bindGroup?: GPUBindGroup;\n}\n\nconst init: SampleInit = async ({ canvas, pageState, gui, stats }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const settings = {\n useRenderBundles: true,\n asteroidCount: 5000,\n };\n gui.add(settings, 'useRenderBundles');\n gui.add(settings, 'asteroidCount', 1000, 10000, 1000).onChange(() => {\n // If the content of the scene changes the render bundle must be recreated.\n ensureEnoughAsteroids();\n updateRenderBundle();\n });\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const shaderModule = device.createShaderModule({\n code: meshWGSL,\n });\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: shaderModule,\n entryPoint: 'vertexMain',\n buffers: [\n {\n arrayStride: SphereLayout.vertexStride,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: SphereLayout.positionsOffset,\n format: 'float32x3',\n },\n {\n // normal\n shaderLocation: 1,\n offset: SphereLayout.normalOffset,\n format: 'float32x3',\n },\n {\n // uv\n shaderLocation: 2,\n offset: SphereLayout.uvOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: shaderModule,\n entryPoint: 'fragmentMain',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Backface culling since the sphere is solid piece of geometry.\n // Faces pointing away from the camera will be occluded by faces\n // pointing toward the camera.\n cullMode: 'back',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the images and upload them into a GPUTexture.\n let planetTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/saturn.jpg');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n planetTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: planetTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n let moonTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/moon.jpg');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n moonTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: moonTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n // Helper functions to create the required meshes and bind groups for each sphere.\n function createSphereRenderable(\n radius: number,\n widthSegments = 32,\n heightSegments = 16,\n randomness = 0\n ): Renderable {\n const sphereMesh = createSphereMesh(\n radius,\n widthSegments,\n heightSegments,\n randomness\n );\n\n // Create a vertex buffer from the sphere data.\n const vertices = device.createBuffer({\n size: sphereMesh.vertices.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(vertices.getMappedRange()).set(sphereMesh.vertices);\n vertices.unmap();\n\n const indices = device.createBuffer({\n size: sphereMesh.indices.byteLength,\n usage: GPUBufferUsage.INDEX,\n mappedAtCreation: true,\n });\n new Uint16Array(indices.getMappedRange()).set(sphereMesh.indices);\n indices.unmap();\n\n return {\n vertices,\n indices,\n indexCount: sphereMesh.indices.length,\n };\n }\n\n function createSphereBindGroup(\n texture: GPUTexture,\n transform: Float32Array\n ): GPUBindGroup {\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n mappedAtCreation: true,\n });\n new Float32Array(uniformBuffer.getMappedRange()).set(transform);\n uniformBuffer.unmap();\n\n const bindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: texture.createView(),\n },\n ],\n });\n\n return bindGroup;\n }\n\n const transform = mat4.create() as Float32Array;\n mat4.identity(transform);\n\n // Create one large central planet surrounded by a large ring of asteroids\n const planet = createSphereRenderable(1.0);\n planet.bindGroup = createSphereBindGroup(planetTexture, transform);\n\n const asteroids = [\n createSphereRenderable(0.01, 8, 6, 0.15),\n createSphereRenderable(0.013, 8, 6, 0.15),\n createSphereRenderable(0.017, 8, 6, 0.15),\n createSphereRenderable(0.02, 8, 6, 0.15),\n createSphereRenderable(0.03, 16, 8, 0.15),\n ];\n\n const renderables = [planet];\n\n function ensureEnoughAsteroids() {\n for (let i = renderables.length; i <= settings.asteroidCount; ++i) {\n // Place copies of the asteroid in a ring.\n const radius = Math.random() * 1.7 + 1.25;\n const angle = Math.random() * Math.PI * 2;\n const x = Math.sin(angle) * radius;\n const y = (Math.random() - 0.5) * 0.015;\n const z = Math.cos(angle) * radius;\n\n mat4.identity(transform);\n mat4.translate(transform, [x, y, z], transform);\n mat4.rotateX(transform, Math.random() * Math.PI, transform);\n mat4.rotateY(transform, Math.random() * Math.PI, transform);\n renderables.push({\n ...asteroids[i % asteroids.length],\n bindGroup: createSphereBindGroup(moonTexture, transform),\n });\n }\n }\n ensureEnoughAsteroids();\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n const frameBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n ],\n });\n\n function getTransformationMatrix() {\n const viewMatrix = mat4.identity();\n mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);\n const now = Date.now() / 1000;\n // Tilt the view matrix so the planet looks like it's off-axis.\n mat4.rotateZ(viewMatrix, Math.PI * 0.1, viewMatrix);\n mat4.rotateX(viewMatrix, Math.PI * 0.1, viewMatrix);\n // Rotate the view matrix slowly so the planet appears to spin.\n mat4.rotateY(viewMatrix, now * 0.05, viewMatrix);\n\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n\n return modelViewProjectionMatrix as Float32Array;\n }\n\n // Render bundles function as partial, limited render passes, so we can use the\n // same code both to render the scene normally and to build the render bundle.\n function renderScene(\n passEncoder: GPURenderPassEncoder | GPURenderBundleEncoder\n ) {\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, frameBindGroup);\n\n // Loop through every renderable object and draw them individually.\n // (Because many of these meshes are repeated, with only the transforms\n // differing, instancing would be highly effective here. This sample\n // intentionally avoids using instancing in order to emulate a more complex\n // scene, which helps demonstrate the potential time savings a render bundle\n // can provide.)\n let count = 0;\n for (const renderable of renderables) {\n passEncoder.setBindGroup(1, renderable.bindGroup);\n passEncoder.setVertexBuffer(0, renderable.vertices);\n passEncoder.setIndexBuffer(renderable.indices, 'uint16');\n passEncoder.drawIndexed(renderable.indexCount);\n\n if (++count > settings.asteroidCount) {\n break;\n }\n }\n }\n\n // The render bundle can be encoded once and re-used as many times as needed.\n // Because it encodes all of the commands needed to render at the GPU level,\n // those commands will not need to execute the associated JavaScript code upon\n // execution or be re-validated, which can represent a significant time savings.\n //\n // However, because render bundles are immutable once created, they are only\n // appropriate for rendering content where the same commands will be executed\n // every time, with the only changes being the contents of the buffers and\n // textures used. Cases where the executed commands differ from frame-to-frame,\n // such as when using frustrum or occlusion culling, will not benefit from\n // using render bundles as much.\n let renderBundle;\n function updateRenderBundle() {\n const renderBundleEncoder = device.createRenderBundleEncoder({\n colorFormats: [presentationFormat],\n depthStencilFormat: 'depth24plus',\n });\n renderScene(renderBundleEncoder);\n renderBundle = renderBundleEncoder.finish();\n }\n updateRenderBundle();\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n stats.begin();\n\n const transformationMatrix = getTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n transformationMatrix.buffer,\n transformationMatrix.byteOffset,\n transformationMatrix.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n\n if (settings.useRenderBundles) {\n // Executing a bundle is equivalent to calling all of the commands encoded\n // in the render bundle as part of the current render pass.\n passEncoder.executeBundles([renderBundle]);\n } else {\n // Alternatively, the same render commands can be encoded manually, which\n // can take longer since each command needs to be interpreted by the\n // JavaScript virtual machine and re-validated each time.\n renderScene(passEncoder);\n }\n\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n stats.end();\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst RenderBundles: () => JSX.Element = () =>\n makeSample({\n name: 'Render Bundles',\n description: `This example shows how to use render bundles. It renders a large number of\n meshes individually as a proxy for a more complex scene in order to demonstrate the reduction\n in JavaScript time spent to issue render commands. (Typically a scene like this would make use\n of instancing to reduce draw overhead.)`,\n gui: true,\n stats: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './mesh.wgsl',\n contents: meshWGSL,\n editable: true,\n },\n {\n name: '../../meshes/sphere.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/sphere.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default RenderBundles;\n"},{name:"./mesh.wgsl",contents:s,editable:!0},{name:"../../meshes/sphere.ts",contents:t(8557).Z}],filename:o});var d=c},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},8557:function(e,n){"use strict";n.Z="import { vec3 } from 'wgpu-matrix';\n\nexport interface SphereMesh {\n vertices: Float32Array;\n indices: Uint16Array;\n}\n\nexport const SphereLayout = {\n vertexStride: 8 * 4,\n positionsOffset: 0,\n normalOffset: 3 * 4,\n uvOffset: 6 * 4,\n};\n\n// Borrowed and simplified from https://github.com/mrdoob/three.js/blob/master/src/geometries/SphereGeometry.js\nexport function createSphereMesh(\n radius: number,\n widthSegments = 32,\n heightSegments = 16,\n randomness = 0\n): SphereMesh {\n const vertices = [];\n const indices = [];\n\n widthSegments = Math.max(3, Math.floor(widthSegments));\n heightSegments = Math.max(2, Math.floor(heightSegments));\n\n const firstVertex = vec3.create();\n const vertex = vec3.create();\n const normal = vec3.create();\n\n let index = 0;\n const grid = [];\n\n // generate vertices, normals and uvs\n for (let iy = 0; iy <= heightSegments; iy++) {\n const verticesRow = [];\n const v = iy / heightSegments;\n\n // special case for the poles\n let uOffset = 0;\n if (iy === 0) {\n uOffset = 0.5 / widthSegments;\n } else if (iy === heightSegments) {\n uOffset = -0.5 / widthSegments;\n }\n\n for (let ix = 0; ix <= widthSegments; ix++) {\n const u = ix / widthSegments;\n\n // Poles should just use the same position all the way around.\n if (ix == widthSegments) {\n vec3.copy(firstVertex, vertex);\n } else if (ix == 0 || (iy != 0 && iy !== heightSegments)) {\n const rr = radius + (Math.random() - 0.5) * 2 * randomness * radius;\n\n // vertex\n vertex[0] = -rr * Math.cos(u * Math.PI * 2) * Math.sin(v * Math.PI);\n vertex[1] = rr * Math.cos(v * Math.PI);\n vertex[2] = rr * Math.sin(u * Math.PI * 2) * Math.sin(v * Math.PI);\n\n if (ix == 0) {\n vec3.copy(vertex, firstVertex);\n }\n }\n\n vertices.push(...vertex);\n\n // normal\n vec3.copy(vertex, normal);\n vec3.normalize(normal, normal);\n vertices.push(...normal);\n\n // uv\n vertices.push(u + uOffset, 1 - v);\n verticesRow.push(index++);\n }\n\n grid.push(verticesRow);\n }\n\n // indices\n for (let iy = 0; iy < heightSegments; iy++) {\n for (let ix = 0; ix < widthSegments; ix++) {\n const a = grid[iy][ix + 1];\n const b = grid[iy][ix];\n const c = grid[iy + 1][ix];\n const d = grid[iy + 1][ix + 1];\n\n if (iy !== 0) indices.push(a, b, d);\n if (iy !== heightSegments - 1) indices.push(b, c, d);\n }\n }\n\n return {\n vertices: new Float32Array(vertices),\n indices: new Uint16Array(indices),\n };\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/432.0c104140e4e6aed7.js b/_next/static/chunks/432.0c104140e4e6aed7.js deleted file mode 100644 index a1af1192..00000000 --- a/_next/static/chunks/432.0c104140e4e6aed7.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[432],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),c=t(9147),u=t.n(c);t(7319);let m=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),c=(0,s.useRef)(null),m=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),l=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),d=(0,o.useRouter)(),f=d.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,h]=(0,s.useState)(null),[x,b]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(f?b(f[1]):b(a[0].name),m&&c.current)for(c.current.appendChild(m.domElement);m.__controllers.length>0;)m.__controllers[0].remove();p&&l.current&&(p.dom.style.position="absolute",p.showPanel(1),l.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:m,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){b(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},l=e=>(0,r.jsx)(m,{...e});function p(e,n){if(!e)throw Error(n)}},4655:function(e,n,t){"use strict";t.d(n,{Ax:function(){return i},MO:function(){return o},O$:function(){return r},v8:function(){return a},zS:function(){return s}});let r=40,a=0,i=32,o=36,s=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0])},1432:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return l}});var r=t(6416),a=t(5671),i=t(4655),o=t(3569),s="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_cube;\n\n@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n // Our camera and the skybox cube are both centered at (0, 0, 0)\n // so we can use the cube geomtry position to get viewing vector to sample the cube texture.\n // The magnitude of the vector doesn't matter.\n var cubemapVec = fragPosition.xyz - vec3(0.5);\n return textureSample(myTexture, mySampler, cubemapVec);\n}\n",c="src/sample/cubemap/main.ts";let u=async e=>{let n,{canvas:t,pageState:a}=e,c=await navigator.gpu.requestAdapter(),u=await c.requestDevice();if(!a.active)return;let m=t.getContext("webgpu"),l=window.devicePixelRatio;t.width=t.clientWidth*l,t.height=t.clientHeight*l;let p=navigator.gpu.getPreferredCanvasFormat();m.configure({device:u,format:p,alphaMode:"premultiplied"});let d=u.createBuffer({size:i.zS.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(d.getMappedRange()).set(i.zS),d.unmap();let f=u.createRenderPipeline({layout:"auto",vertex:{module:u.createShaderModule({code:o.Z}),entryPoint:"main",buffers:[{arrayStride:i.O$,attributes:[{shaderLocation:0,offset:i.v8,format:"float32x4"},{shaderLocation:1,offset:i.Ax,format:"float32x2"}]}]},fragment:{module:u.createShaderModule({code:s}),entryPoint:"main",targets:[{format:p}]},primitive:{topology:"triangle-list",cullMode:"none"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),g=u.createTexture({size:[t.width,t.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT});{let h=["/assets/img/cubemap/posx.jpg","/assets/img/cubemap/negx.jpg","/assets/img/cubemap/posy.jpg","/assets/img/cubemap/negy.jpg","/assets/img/cubemap/posz.jpg","/assets/img/cubemap/negz.jpg"].map(async e=>{let n=await fetch(e);return createImageBitmap(await n.blob())}),x=await Promise.all(h);n=u.createTexture({dimension:"2d",size:[x[0].width,x[0].height,6],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});for(let b=0;b(0,a.Tl)({name:"Cubemap",description:"This example shows how to render and sample from a cubemap texture.",init:u,sources:[{name:c.substring(19),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\n\nimport basicVertWGSL from '../../shaders/basic.vert.wgsl';\nimport sampleCubemapWGSL from './sampleCubemap.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: basicVertWGSL,\n }),\n entryPoint: 'main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleCubemapWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Since we are seeing from inside of the cube\n // and we are using the regular cube geomtry data with outward-facing normals,\n // the cullMode should be 'front' or 'none'.\n cullMode: 'none',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n // Fetch the 6 separate images for negative/positive x, y, z axis of a cubemap\n // and upload it into a GPUTexture.\n let cubemapTexture: GPUTexture;\n {\n // The order of the array layers is [+X, -X, +Y, -Y, +Z, -Z]\n const imgSrcs = [\n '/assets/img/cubemap/posx.jpg',\n '/assets/img/cubemap/negx.jpg',\n '/assets/img/cubemap/posy.jpg',\n '/assets/img/cubemap/negy.jpg',\n '/assets/img/cubemap/posz.jpg',\n '/assets/img/cubemap/negz.jpg',\n ];\n const promises = imgSrcs.map(async (src) => {\n const response = await fetch(src);\n return createImageBitmap(await response.blob());\n });\n const imageBitmaps = await Promise.all(promises);\n\n cubemapTexture = device.createTexture({\n dimension: '2d',\n // Create a 2d array texture.\n // Assume each image has the same size.\n size: [imageBitmaps[0].width, imageBitmaps[0].height, 6],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n for (let i = 0; i < imageBitmaps.length; i++) {\n const imageBitmap = imageBitmaps[i];\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubemapTexture, origin: [0, 0, i] },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n }\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n offset: 0,\n size: uniformBufferSize,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: cubemapTexture.createView({\n dimension: 'cube',\n }),\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 3000);\n\n const modelMatrix = mat4.scaling(vec3.fromValues(1000, 1000, 1000));\n const modelViewProjectionMatrix = mat4.create() as Float32Array;\n const viewMatrix = mat4.identity();\n\n const tmpMat4 = mat4.create();\n\n // Comppute camera movement:\n // It rotates around Y axis with a slight pitch movement.\n function updateTransformationMatrix() {\n const now = Date.now() / 800;\n\n mat4.rotate(\n viewMatrix,\n vec3.fromValues(1, 0, 0),\n (Math.PI / 10) * Math.sin(now),\n tmpMat4\n );\n mat4.rotate(tmpMat4, vec3.fromValues(0, 1, 0), now * 0.2, tmpMat4);\n\n mat4.multiply(tmpMat4, modelMatrix, modelViewProjectionMatrix);\n mat4.multiply(\n projectionMatrix,\n modelViewProjectionMatrix,\n modelViewProjectionMatrix\n );\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n updateTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n modelViewProjectionMatrix.buffer,\n modelViewProjectionMatrix.byteOffset,\n modelViewProjectionMatrix.byteLength\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst CubemapCubes: () => JSX.Element = () =>\n makeSample({\n name: 'Cubemap',\n description:\n 'This example shows how to render and sample from a cubemap texture.',\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/basic.vert.wgsl',\n contents: basicVertWGSL,\n editable: true,\n },\n {\n name: './sampleCubemap.frag.wgsl',\n contents: sampleCubemapWGSL,\n editable: true,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default CubemapCubes;\n"},{name:"../../shaders/basic.vert.wgsl",contents:o.Z,editable:!0},{name:"./sampleCubemap.frag.wgsl",contents:s,editable:!0},{name:"../../meshes/cube.ts",contents:t(2448).Z}],filename:c});var l=m},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,n){"use strict";n.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"},3569:function(e,n){"use strict";n.Z="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/432.b708dc8c2555b6f7.js b/_next/static/chunks/432.b708dc8c2555b6f7.js new file mode 100644 index 00000000..231b1ff1 --- /dev/null +++ b/_next/static/chunks/432.b708dc8c2555b6f7.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[432],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return l},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),c=t(9147),u=t.n(c);t(7319);let m=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),c=(0,s.useRef)(null),m=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),l=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),d=(0,o.useRouter)(),f=d.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,h]=(0,s.useState)(null),[x,b]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(f?b(f[1]):b(a[0].name),m&&c.current)for(c.current.appendChild(m.domElement);m.__controllers.length>0;)m.__controllers[0].remove();p&&l.current&&(p.dom.style.position="absolute",p.showPanel(1),l.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:m,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:l}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){b(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},l=e=>(0,r.jsx)(m,{...e});function p(e,n){if(!e)throw Error(n)}},4655:function(e,n,t){"use strict";t.d(n,{Ax:function(){return i},MO:function(){return o},O$:function(){return r},v8:function(){return a},zS:function(){return s}});let r=40,a=0,i=32,o=36,s=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0])},1432:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return l}});var r=t(6416),a=t(5671),i=t(4655),o=t(3569),s="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_cube;\n\n@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n // Our camera and the skybox cube are both centered at (0, 0, 0)\n // so we can use the cube geomtry position to get viewing vector to sample the cube texture.\n // The magnitude of the vector doesn't matter.\n var cubemapVec = fragPosition.xyz - vec3(0.5);\n return textureSample(myTexture, mySampler, cubemapVec);\n}\n",c="src/sample/cubemap/main.ts";let u=async e=>{let n,{canvas:t,pageState:a}=e,c=await navigator.gpu.requestAdapter(),u=await c.requestDevice();if(!a.active)return;let m=t.getContext("webgpu"),l=window.devicePixelRatio;t.width=t.clientWidth*l,t.height=t.clientHeight*l;let p=navigator.gpu.getPreferredCanvasFormat();m.configure({device:u,format:p,alphaMode:"premultiplied"});let d=u.createBuffer({size:i.zS.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(d.getMappedRange()).set(i.zS),d.unmap();let f=u.createRenderPipeline({layout:"auto",vertex:{module:u.createShaderModule({code:o.Z}),entryPoint:"main",buffers:[{arrayStride:i.O$,attributes:[{shaderLocation:0,offset:i.v8,format:"float32x4"},{shaderLocation:1,offset:i.Ax,format:"float32x2"}]}]},fragment:{module:u.createShaderModule({code:s}),entryPoint:"main",targets:[{format:p}]},primitive:{topology:"triangle-list",cullMode:"none"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),g=u.createTexture({size:[t.width,t.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT});{let h=["../assets/img/cubemap/posx.jpg","../assets/img/cubemap/negx.jpg","../assets/img/cubemap/posy.jpg","../assets/img/cubemap/negy.jpg","../assets/img/cubemap/posz.jpg","../assets/img/cubemap/negz.jpg"].map(async e=>{let n=await fetch(e);return createImageBitmap(await n.blob())}),x=await Promise.all(h);n=u.createTexture({dimension:"2d",size:[x[0].width,x[0].height,6],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});for(let b=0;b(0,a.Tl)({name:"Cubemap",description:"This example shows how to render and sample from a cubemap texture.",init:u,sources:[{name:c.substring(19),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\n\nimport basicVertWGSL from '../../shaders/basic.vert.wgsl';\nimport sampleCubemapWGSL from './sampleCubemap.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: basicVertWGSL,\n }),\n entryPoint: 'main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleCubemapWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Since we are seeing from inside of the cube\n // and we are using the regular cube geomtry data with outward-facing normals,\n // the cullMode should be 'front' or 'none'.\n cullMode: 'none',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n // Fetch the 6 separate images for negative/positive x, y, z axis of a cubemap\n // and upload it into a GPUTexture.\n let cubemapTexture: GPUTexture;\n {\n // The order of the array layers is [+X, -X, +Y, -Y, +Z, -Z]\n const imgSrcs = [\n '../assets/img/cubemap/posx.jpg',\n '../assets/img/cubemap/negx.jpg',\n '../assets/img/cubemap/posy.jpg',\n '../assets/img/cubemap/negy.jpg',\n '../assets/img/cubemap/posz.jpg',\n '../assets/img/cubemap/negz.jpg',\n ];\n const promises = imgSrcs.map(async (src) => {\n const response = await fetch(src);\n return createImageBitmap(await response.blob());\n });\n const imageBitmaps = await Promise.all(promises);\n\n cubemapTexture = device.createTexture({\n dimension: '2d',\n // Create a 2d array texture.\n // Assume each image has the same size.\n size: [imageBitmaps[0].width, imageBitmaps[0].height, 6],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n for (let i = 0; i < imageBitmaps.length; i++) {\n const imageBitmap = imageBitmaps[i];\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubemapTexture, origin: [0, 0, i] },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n }\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n offset: 0,\n size: uniformBufferSize,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: cubemapTexture.createView({\n dimension: 'cube',\n }),\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective((2 * Math.PI) / 5, aspect, 1, 3000);\n\n const modelMatrix = mat4.scaling(vec3.fromValues(1000, 1000, 1000));\n const modelViewProjectionMatrix = mat4.create() as Float32Array;\n const viewMatrix = mat4.identity();\n\n const tmpMat4 = mat4.create();\n\n // Comppute camera movement:\n // It rotates around Y axis with a slight pitch movement.\n function updateTransformationMatrix() {\n const now = Date.now() / 800;\n\n mat4.rotate(\n viewMatrix,\n vec3.fromValues(1, 0, 0),\n (Math.PI / 10) * Math.sin(now),\n tmpMat4\n );\n mat4.rotate(tmpMat4, vec3.fromValues(0, 1, 0), now * 0.2, tmpMat4);\n\n mat4.multiply(tmpMat4, modelMatrix, modelViewProjectionMatrix);\n mat4.multiply(\n projectionMatrix,\n modelViewProjectionMatrix,\n modelViewProjectionMatrix\n );\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n updateTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n modelViewProjectionMatrix.buffer,\n modelViewProjectionMatrix.byteOffset,\n modelViewProjectionMatrix.byteLength\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst CubemapCubes: () => JSX.Element = () =>\n makeSample({\n name: 'Cubemap',\n description:\n 'This example shows how to render and sample from a cubemap texture.',\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/basic.vert.wgsl',\n contents: basicVertWGSL,\n editable: true,\n },\n {\n name: './sampleCubemap.frag.wgsl',\n contents: sampleCubemapWGSL,\n editable: true,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default CubemapCubes;\n"},{name:"../../shaders/basic.vert.wgsl",contents:o.Z,editable:!0},{name:"./sampleCubemap.frag.wgsl",contents:s,editable:!0},{name:"../../meshes/cube.ts",contents:t(2448).Z}],filename:c});var l=m},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,n){"use strict";n.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"},3569:function(e,n){"use strict";n.Z="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/613.a0b21871f0f1166d.js b/_next/static/chunks/613.a0b21871f0f1166d.js new file mode 100644 index 00000000..111d2c30 --- /dev/null +++ b/_next/static/chunks/613.a0b21871f0f1166d.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[613],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return m},hu:function(){return f}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),c=t(9147),u=t.n(c);t(7319);let l=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),c=(0,s.useRef)(null),l=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),m=(0,s.useRef)(null),f=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),d=(0,o.useRouter)(),p=d.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,h]=(0,s.useState)(null),[x,v]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(p?v(p[1]):v(a[0].name),l&&c.current)for(c.current.appendChild(l.domElement);l.__controllers.length>0;)l.__controllers[0].remove();f&&m.current&&(f.dom.style.position="absolute",f.showPanel(1),m.current.appendChild(f.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:l,stats:f});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:m}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){v(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},m=e=>(0,r.jsx)(l,{...e});function f(e,n){if(!e)throw Error(n)}},4655:function(e,n,t){"use strict";t.d(n,{Ax:function(){return i},MO:function(){return o},O$:function(){return r},v8:function(){return a},zS:function(){return s}});let r=40,a=0,i=32,o=36,s=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0])},613:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return m}});var r=t(6416),a=t(5671),i=t(4655),o=t(3569),s="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_2d;\n\n@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV) * fragPosition;\n}\n",c="src/sample/texturedCube/main.ts";let u=async e=>{let n,{canvas:t,pageState:a}=e,c=await navigator.gpu.requestAdapter(),u=await c.requestDevice();if(!a.active)return;let l=t.getContext("webgpu"),m=window.devicePixelRatio;t.width=t.clientWidth*m,t.height=t.clientHeight*m;let f=navigator.gpu.getPreferredCanvasFormat();l.configure({device:u,format:f,alphaMode:"premultiplied"});let d=u.createBuffer({size:i.zS.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(d.getMappedRange()).set(i.zS),d.unmap();let p=u.createRenderPipeline({layout:"auto",vertex:{module:u.createShaderModule({code:o.Z}),entryPoint:"main",buffers:[{arrayStride:i.O$,attributes:[{shaderLocation:0,offset:i.v8,format:"float32x4"},{shaderLocation:1,offset:i.Ax,format:"float32x2"}]}]},fragment:{module:u.createShaderModule({code:s}),entryPoint:"main",targets:[{format:f}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),g=u.createTexture({size:[t.width,t.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),h=u.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let x=await fetch("../assets/img/Di-3d.png"),v=await createImageBitmap(await x.blob());n=u.createTexture({size:[v.width,v.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),u.queue.copyExternalImageToTexture({source:v},{texture:n},[v.width,v.height])}let b=u.createSampler({magFilter:"linear",minFilter:"linear"}),w=u.createBindGroup({layout:p.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:h}},{binding:1,resource:b},{binding:2,resource:n.createView()}]}),T={colorAttachments:[{view:void 0,clearValue:{r:.5,g:.5,b:.5,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:g.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},P=t.width/t.height,y=r._E.perspective(2*Math.PI/5,P,1,100),C=r._E.create();requestAnimationFrame(function e(){if(!a.active)return;let n=function(){let e=r._E.identity();r._E.translate(e,r.R3.fromValues(0,0,-4),e);let n=Date.now()/1e3;return r._E.rotate(e,r.R3.fromValues(Math.sin(n),Math.cos(n),0),1,e),r._E.multiply(y,e,C),C}();u.queue.writeBuffer(h,0,n.buffer,n.byteOffset,n.byteLength),T.colorAttachments[0].view=l.getCurrentTexture().createView();let t=u.createCommandEncoder(),o=t.beginRenderPass(T);o.setPipeline(p),o.setBindGroup(0,w),o.setVertexBuffer(0,d),o.draw(i.MO),o.end(),u.queue.submit([t.finish()]),requestAnimationFrame(e)})},l=()=>(0,a.Tl)({name:"Textured Cube",description:"This example shows how to bind and sample textures.",init:u,sources:[{name:c.substring(24),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\n\nimport basicVertWGSL from '../../shaders/basic.vert.wgsl';\nimport sampleTextureMixColorWGSL from './sampleTextureMixColor.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: basicVertWGSL,\n }),\n entryPoint: 'main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleTextureMixColorWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Backface culling since the cube is solid piece of geometry.\n // Faces pointing away from the camera will be occluded by faces\n // pointing toward the camera.\n cullMode: 'back',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let cubeTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/Di-3d.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n cubeTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubeTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: cubeTexture.createView(),\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n function getTransformationMatrix() {\n const viewMatrix = mat4.identity();\n mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);\n const now = Date.now() / 1000;\n mat4.rotate(\n viewMatrix,\n vec3.fromValues(Math.sin(now), Math.cos(now), 0),\n 1,\n viewMatrix\n );\n\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n\n return modelViewProjectionMatrix as Float32Array;\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const transformationMatrix = getTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n transformationMatrix.buffer,\n transformationMatrix.byteOffset,\n transformationMatrix.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst TexturedCube: () => JSX.Element = () =>\n makeSample({\n name: 'Textured Cube',\n description: 'This example shows how to bind and sample textures.',\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/basic.vert.wgsl',\n contents: basicVertWGSL,\n editable: true,\n },\n {\n name: './sampleTextureMixColor.frag.wgsl',\n contents: sampleTextureMixColorWGSL,\n editable: true,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default TexturedCube;\n"},{name:"../../shaders/basic.vert.wgsl",contents:o.Z,editable:!0},{name:"./sampleTextureMixColor.frag.wgsl",contents:s,editable:!0},{name:"../../meshes/cube.ts",contents:t(2448).Z}],filename:c});var m=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,n){"use strict";n.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"},3569:function(e,n){"use strict";n.Z="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/613.fefb0c175c2d45b2.js b/_next/static/chunks/613.fefb0c175c2d45b2.js deleted file mode 100644 index c095a008..00000000 --- a/_next/static/chunks/613.fefb0c175c2d45b2.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[613],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return m},hu:function(){return f}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),c=t(9147),u=t.n(c);t(7319);let l=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),c=(0,s.useRef)(null),l=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),m=(0,s.useRef)(null),f=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),d=(0,o.useRouter)(),p=d.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,h]=(0,s.useState)(null),[x,v]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(p?v(p[1]):v(a[0].name),l&&c.current)for(c.current.appendChild(l.domElement);l.__controllers.length>0;)l.__controllers[0].remove();f&&m.current&&(f.dom.style.position="absolute",f.showPanel(1),m.current.appendChild(f.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:l,stats:f});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(s){console.error(s),h(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:m}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){v(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},m=e=>(0,r.jsx)(l,{...e});function f(e,n){if(!e)throw Error(n)}},4655:function(e,n,t){"use strict";t.d(n,{Ax:function(){return i},MO:function(){return o},O$:function(){return r},v8:function(){return a},zS:function(){return s}});let r=40,a=0,i=32,o=36,s=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0])},613:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return m}});var r=t(6416),a=t(5671),i=t(4655),o=t(3569),s="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_2d;\n\n@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV) * fragPosition;\n}\n",c="src/sample/texturedCube/main.ts";let u=async e=>{let n,{canvas:t,pageState:a}=e,c=await navigator.gpu.requestAdapter(),u=await c.requestDevice();if(!a.active)return;let l=t.getContext("webgpu"),m=window.devicePixelRatio;t.width=t.clientWidth*m,t.height=t.clientHeight*m;let f=navigator.gpu.getPreferredCanvasFormat();l.configure({device:u,format:f,alphaMode:"premultiplied"});let d=u.createBuffer({size:i.zS.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(d.getMappedRange()).set(i.zS),d.unmap();let p=u.createRenderPipeline({layout:"auto",vertex:{module:u.createShaderModule({code:o.Z}),entryPoint:"main",buffers:[{arrayStride:i.O$,attributes:[{shaderLocation:0,offset:i.v8,format:"float32x4"},{shaderLocation:1,offset:i.Ax,format:"float32x2"}]}]},fragment:{module:u.createShaderModule({code:s}),entryPoint:"main",targets:[{format:f}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),g=u.createTexture({size:[t.width,t.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),h=u.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let x=await fetch("/assets/img/Di-3d.png"),v=await createImageBitmap(await x.blob());n=u.createTexture({size:[v.width,v.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),u.queue.copyExternalImageToTexture({source:v},{texture:n},[v.width,v.height])}let b=u.createSampler({magFilter:"linear",minFilter:"linear"}),w=u.createBindGroup({layout:p.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:h}},{binding:1,resource:b},{binding:2,resource:n.createView()}]}),T={colorAttachments:[{view:void 0,clearValue:{r:.5,g:.5,b:.5,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:g.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},P=t.width/t.height,y=r._E.perspective(2*Math.PI/5,P,1,100),C=r._E.create();requestAnimationFrame(function e(){if(!a.active)return;let n=function(){let e=r._E.identity();r._E.translate(e,r.R3.fromValues(0,0,-4),e);let n=Date.now()/1e3;return r._E.rotate(e,r.R3.fromValues(Math.sin(n),Math.cos(n),0),1,e),r._E.multiply(y,e,C),C}();u.queue.writeBuffer(h,0,n.buffer,n.byteOffset,n.byteLength),T.colorAttachments[0].view=l.getCurrentTexture().createView();let t=u.createCommandEncoder(),o=t.beginRenderPass(T);o.setPipeline(p),o.setBindGroup(0,w),o.setVertexBuffer(0,d),o.draw(i.MO),o.end(),u.queue.submit([t.finish()]),requestAnimationFrame(e)})},l=()=>(0,a.Tl)({name:"Textured Cube",description:"This example shows how to bind and sample textures.",init:u,sources:[{name:c.substring(24),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\n\nimport basicVertWGSL from '../../shaders/basic.vert.wgsl';\nimport sampleTextureMixColorWGSL from './sampleTextureMixColor.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: basicVertWGSL,\n }),\n entryPoint: 'main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleTextureMixColorWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Backface culling since the cube is solid piece of geometry.\n // Faces pointing away from the camera will be occluded by faces\n // pointing toward the camera.\n cullMode: 'back',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let cubeTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/Di-3d.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n cubeTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubeTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: cubeTexture.createView(),\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n function getTransformationMatrix() {\n const viewMatrix = mat4.identity();\n mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);\n const now = Date.now() / 1000;\n mat4.rotate(\n viewMatrix,\n vec3.fromValues(Math.sin(now), Math.cos(now), 0),\n 1,\n viewMatrix\n );\n\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n\n return modelViewProjectionMatrix as Float32Array;\n }\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const transformationMatrix = getTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n transformationMatrix.buffer,\n transformationMatrix.byteOffset,\n transformationMatrix.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst TexturedCube: () => JSX.Element = () =>\n makeSample({\n name: 'Textured Cube',\n description: 'This example shows how to bind and sample textures.',\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/basic.vert.wgsl',\n contents: basicVertWGSL,\n editable: true,\n },\n {\n name: './sampleTextureMixColor.frag.wgsl',\n contents: sampleTextureMixColorWGSL,\n editable: true,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default TexturedCube;\n"},{name:"../../shaders/basic.vert.wgsl",contents:o.Z,editable:!0},{name:"./sampleTextureMixColor.frag.wgsl",contents:s,editable:!0},{name:"../../meshes/cube.ts",contents:t(2448).Z}],filename:c});var m=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,n){"use strict";n.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"},3569:function(e,n){"use strict";n.Z="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/677.341d6775960cfe08.js b/_next/static/chunks/677.341d6775960cfe08.js new file mode 100644 index 00000000..87539da9 --- /dev/null +++ b/_next/static/chunks/677.341d6775960cfe08.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[677],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return m}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),u=t.n(l);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),m=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),v=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,h]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(v?h(v[1]):h(a[0].name),c&&l.current)for(l.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();m&&d.current&&(m.dom.style.position="absolute",m.showPanel(1),d.current.appendChild(m.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:m});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){h(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function m(e,n){if(!e)throw Error(n)}},6677:function(e,n,t){"use strict";var r="src/sample/videoUploading/main.ts";t.r(n);var a=t(5671),i=t(134),o=t(7618);let s=async e=>{let{canvas:n,pageState:t,gui:r}=e,a=document.createElement("video");a.loop=!0,a.autoplay=!0,a.muted=!0,a.src="../assets/video/pano.webm",await a.play();let s=await navigator.gpu.requestAdapter(),l=await s.requestDevice();if(!t.active)return;let u=n.getContext("webgpu"),c=window.devicePixelRatio;n.width=n.clientWidth*c,n.height=n.clientHeight*c;let d=navigator.gpu.getPreferredCanvasFormat();u.configure({device:l,format:d,alphaMode:"premultiplied"});let m=l.createRenderPipeline({layout:"auto",vertex:{module:l.createShaderModule({code:i.Z}),entryPoint:"vert_main"},fragment:{module:l.createShaderModule({code:o.Z}),entryPoint:"main",targets:[{format:d}]},primitive:{topology:"triangle-list"}}),p=l.createSampler({magFilter:"linear",minFilter:"linear"}),v={requestFrame:"requestAnimationFrame"};function g(){if(!t.active)return;let e=l.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:1,resource:p},{binding:2,resource:l.importExternalTexture({source:a})}]}),n=l.createCommandEncoder(),r=u.getCurrentTexture().createView(),i=n.beginRenderPass({colorAttachments:[{view:r,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}]});i.setPipeline(m),i.setBindGroup(0,e),i.draw(6),i.end(),l.queue.submit([n.finish()]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)}r.add(v,"requestFrame",["requestAnimationFrame","requestVideoFrameCallback"]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)},l=()=>(0,a.Tl)({name:"Video Uploading",description:"This example shows how to upload video frame to WebGPU.",gui:!0,init:s,sources:[{name:r.substring(26),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';\nimport sampleExternalTextureWGSL from '../../shaders/sampleExternalTexture.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n // Set video element\n const video = document.createElement('video');\n video.loop = true;\n video.autoplay = true;\n video.muted = true;\n video.src = '../assets/video/pano.webm';\n await video.play();\n\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'vert_main',\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleExternalTextureWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const settings = {\n requestFrame: 'requestAnimationFrame',\n };\n\n gui.add(settings, 'requestFrame', [\n 'requestAnimationFrame',\n 'requestVideoFrameCallback',\n ]);\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: device.importExternalTexture({\n source: video,\n }),\n },\n ],\n });\n\n const commandEncoder = device.createCommandEncoder();\n const textureView = context.getCurrentTexture().createView();\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: textureView,\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.draw(6);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n }\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n};\n\nconst VideoUploading: () => JSX.Element = () =>\n makeSample({\n name: 'Video Uploading',\n description: 'This example shows how to upload video frame to WebGPU.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/fullscreenTexturedQuad.wgsl',\n contents: fullscreenTexturedQuadWGSL,\n editable: true,\n },\n {\n name: '../../shaders/sampleExternalTexture.wgsl',\n contents: sampleExternalTextureWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default VideoUploading;\n"},{name:"../../shaders/fullscreenTexturedQuad.wgsl",contents:i.Z,editable:!0},{name:"../../shaders/sampleExternalTexture.wgsl",contents:o.Z,editable:!0}],filename:r});n.default=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},134:function(e,n){"use strict";n.Z="@group(0) @binding(0) var mySampler : sampler;\n@group(0) @binding(1) var myTexture : texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n}\n\n@vertex\nfn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {\n const pos = array(\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2(-1.0, 1.0),\n );\n\n const uv = array(\n vec2(1.0, 0.0),\n vec2(1.0, 1.0),\n vec2(0.0, 1.0),\n vec2(1.0, 0.0),\n vec2(0.0, 1.0),\n vec2(0.0, 0.0),\n );\n\n var output : VertexOutput;\n output.Position = vec4(pos[VertexIndex], 0.0, 1.0);\n output.fragUV = uv[VertexIndex];\n return output;\n}\n\n@fragment\nfn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV);\n}\n"},7618:function(e,n){"use strict";n.Z="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_external;\n\n@fragment\nfn main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSampleBaseClampToEdge(myTexture, mySampler, fragUV);\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/677.956018e927779b1e.js b/_next/static/chunks/677.956018e927779b1e.js deleted file mode 100644 index f76963e8..00000000 --- a/_next/static/chunks/677.956018e927779b1e.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[677],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return m}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),u=t.n(l);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),m=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),p=(0,o.useRouter)(),v=p.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,h]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(v?h(v[1]):h(a[0].name),c&&l.current)for(l.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();m&&d.current&&(m.dom.style.position="absolute",m.showPanel(1),d.current.appendChild(m.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:m});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){h(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function m(e,n){if(!e)throw Error(n)}},6677:function(e,n,t){"use strict";var r="src/sample/videoUploading/main.ts";t.r(n);var a=t(5671),i=t(134),o=t(7618);let s=async e=>{let{canvas:n,pageState:t,gui:r}=e,a=document.createElement("video");a.loop=!0,a.autoplay=!0,a.muted=!0,a.src="/assets/video/pano.webm",await a.play();let s=await navigator.gpu.requestAdapter(),l=await s.requestDevice();if(!t.active)return;let u=n.getContext("webgpu"),c=window.devicePixelRatio;n.width=n.clientWidth*c,n.height=n.clientHeight*c;let d=navigator.gpu.getPreferredCanvasFormat();u.configure({device:l,format:d,alphaMode:"premultiplied"});let m=l.createRenderPipeline({layout:"auto",vertex:{module:l.createShaderModule({code:i.Z}),entryPoint:"vert_main"},fragment:{module:l.createShaderModule({code:o.Z}),entryPoint:"main",targets:[{format:d}]},primitive:{topology:"triangle-list"}}),p=l.createSampler({magFilter:"linear",minFilter:"linear"}),v={requestFrame:"requestAnimationFrame"};function g(){if(!t.active)return;let e=l.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:1,resource:p},{binding:2,resource:l.importExternalTexture({source:a})}]}),n=l.createCommandEncoder(),r=u.getCurrentTexture().createView(),i=n.beginRenderPass({colorAttachments:[{view:r,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}]});i.setPipeline(m),i.setBindGroup(0,e),i.draw(6),i.end(),l.queue.submit([n.finish()]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)}r.add(v,"requestFrame",["requestAnimationFrame","requestVideoFrameCallback"]),"requestVideoFrameCallback"==v.requestFrame?a.requestVideoFrameCallback(g):requestAnimationFrame(g)},l=()=>(0,a.Tl)({name:"Video Uploading",description:"This example shows how to upload video frame to WebGPU.",gui:!0,init:s,sources:[{name:r.substring(26),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';\nimport sampleExternalTextureWGSL from '../../shaders/sampleExternalTexture.frag.wgsl';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n // Set video element\n const video = document.createElement('video');\n video.loop = true;\n video.autoplay = true;\n video.muted = true;\n video.src = '/assets/video/pano.webm';\n await video.play();\n\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'vert_main',\n },\n fragment: {\n module: device.createShaderModule({\n code: sampleExternalTextureWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const settings = {\n requestFrame: 'requestAnimationFrame',\n };\n\n gui.add(settings, 'requestFrame', [\n 'requestAnimationFrame',\n 'requestVideoFrameCallback',\n ]);\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: device.importExternalTexture({\n source: video,\n }),\n },\n ],\n });\n\n const commandEncoder = device.createCommandEncoder();\n const textureView = context.getCurrentTexture().createView();\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: textureView,\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n };\n\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.draw(6);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n }\n\n if (settings.requestFrame == 'requestVideoFrameCallback') {\n video.requestVideoFrameCallback(frame);\n } else {\n requestAnimationFrame(frame);\n }\n};\n\nconst VideoUploading: () => JSX.Element = () =>\n makeSample({\n name: 'Video Uploading',\n description: 'This example shows how to upload video frame to WebGPU.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: '../../shaders/fullscreenTexturedQuad.wgsl',\n contents: fullscreenTexturedQuadWGSL,\n editable: true,\n },\n {\n name: '../../shaders/sampleExternalTexture.wgsl',\n contents: sampleExternalTextureWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default VideoUploading;\n"},{name:"../../shaders/fullscreenTexturedQuad.wgsl",contents:i.Z,editable:!0},{name:"../../shaders/sampleExternalTexture.wgsl",contents:o.Z,editable:!0}],filename:r});n.default=l},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},134:function(e,n){"use strict";n.Z="@group(0) @binding(0) var mySampler : sampler;\n@group(0) @binding(1) var myTexture : texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n}\n\n@vertex\nfn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {\n const pos = array(\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2(-1.0, 1.0),\n );\n\n const uv = array(\n vec2(1.0, 0.0),\n vec2(1.0, 1.0),\n vec2(0.0, 1.0),\n vec2(1.0, 0.0),\n vec2(0.0, 1.0),\n vec2(0.0, 0.0),\n );\n\n var output : VertexOutput;\n output.Position = vec4(pos[VertexIndex], 0.0, 1.0);\n output.fragUV = uv[VertexIndex];\n return output;\n}\n\n@fragment\nfn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV);\n}\n"},7618:function(e,n){"use strict";n.Z="@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_external;\n\n@fragment\nfn main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSampleBaseClampToEdge(myTexture, mySampler, fragUV);\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/770.3da42912bb9098b5.js b/_next/static/chunks/770.3da42912bb9098b5.js deleted file mode 100644 index 187adaba..00000000 --- a/_next/static/chunks/770.3da42912bb9098b5.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[770],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),i=t(9008),a=t.n(i),o=t(1163),u=t(7294),s=t(9147),c=t.n(s);t(7319);let l=e=>{let n=(0,u.useRef)(null),i=(0,u.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:i}=e;return{name:n,...function(e){let n;let i=null;{i=document.createElement("div");let a=t(4631);n=a(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){i&&t&&(t.appendChild(i),n.setOption("value",e))}})})}}}(i)}}),e.sources),s=(0,u.useRef)(null),l=(0,u.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,u.useRef)(null),p=(0,u.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,o.useRouter)(),g=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[f,h]=(0,u.useState)(null),[v,b]=(0,u.useState)(null);return(0,u.useEffect)(()=>{if(g?b(g[1]):b(i[0].name),l&&s.current)for(s.current.appendChild(l.domElement);l.__controllers.length>0;)l.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let a=n.current;if(!a)throw Error("The canvas is not available");let o=e.init({canvas:a,pageState:t,gui:l,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(u){console.error(u),h(u)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(a(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),f?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(f)})]}):null]}),(0,r.jsxs)("div",{className:c().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:s}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:c().sourceFileNav,children:(0,r.jsx)("ul",{children:i.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){b(e.name)},children:e.name})},n))})}),i.map((e,n)=>(0,r.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":v==e.name},n))]})]})},d=e=>(0,r.jsx)(l,{...e});function p(e,n){if(!e)throw Error(n)}},1770:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return c}});var r=t(5671),i="struct Params {\n filterDim : i32,\n blockDim : u32,\n}\n\n@group(0) @binding(0) var samp : sampler;\n@group(0) @binding(1) var params : Params;\n@group(1) @binding(1) var inputTex : texture_2d;\n@group(1) @binding(2) var outputTex : texture_storage_2d;\n\nstruct Flip {\n value : u32,\n}\n@group(1) @binding(3) var flip : Flip;\n\n// This shader blurs the input texture in one direction, depending on whether\n// |flip.value| is 0 or 1.\n// It does so by running (128 / 4) threads per workgroup to load 128\n// texels into 4 rows of shared memory. Each thread loads a\n// 4 x 4 block of texels to take advantage of the texture sampling\n// hardware.\n// Then, each thread computes the blur result by averaging the adjacent texel values\n// in shared memory.\n// Because we're operating on a subset of the texture, we cannot compute all of the\n// results since not all of the neighbors are available in shared memory.\n// Specifically, with 128 x 128 tiles, we can only compute and write out\n// square blocks of size 128 - (filterSize - 1). We compute the number of blocks\n// needed in Javascript and dispatch that amount.\n\nvar tile : array, 128>, 4>;\n\n@compute @workgroup_size(32, 1, 1)\nfn main(\n @builtin(workgroup_id) WorkGroupID : vec3,\n @builtin(local_invocation_id) LocalInvocationID : vec3\n) {\n let filterOffset = (params.filterDim - 1) / 2;\n let dims = vec2(textureDimensions(inputTex, 0));\n let baseIndex = vec2(WorkGroupID.xy * vec2(params.blockDim, 4) +\n LocalInvocationID.xy * vec2(4, 1))\n - vec2(filterOffset, 0);\n\n for (var r = 0; r < 4; r++) {\n for (var c = 0; c < 4; c++) {\n var loadIndex = baseIndex + vec2(c, r);\n if (flip.value != 0u) {\n loadIndex = loadIndex.yx;\n }\n\n tile[r][4 * LocalInvocationID.x + u32(c)] = textureSampleLevel(\n inputTex,\n samp,\n (vec2(loadIndex) + vec2(0.25, 0.25)) / vec2(dims),\n 0.0\n ).rgb;\n }\n }\n\n workgroupBarrier();\n\n for (var r = 0; r < 4; r++) {\n for (var c = 0; c < 4; c++) {\n var writeIndex = baseIndex + vec2(c, r);\n if (flip.value != 0) {\n writeIndex = writeIndex.yx;\n }\n\n let center = i32(4 * LocalInvocationID.x) + c;\n if (center >= filterOffset &&\n center < 128 - filterOffset &&\n all(writeIndex < dims)) {\n var acc = vec3(0.0, 0.0, 0.0);\n for (var f = 0; f < params.filterDim; f++) {\n var i = center + f - filterOffset;\n acc = acc + (1.0 / f32(params.filterDim)) * tile[r][i];\n }\n textureStore(outputTex, writeIndex, vec4(acc, 1.0));\n }\n }\n }\n}\n",a=t(134),o="src/sample/imageBlur/main.ts";let u=async e=>{let n,{canvas:t,pageState:r,gui:o}=e,u=await navigator.gpu.requestAdapter(),s=await u.requestDevice();if(!r.active)return;let c=t.getContext("webgpu"),l=window.devicePixelRatio;t.width=t.clientWidth*l,t.height=t.clientHeight*l;let d=navigator.gpu.getPreferredCanvasFormat();c.configure({device:s,format:d,alphaMode:"premultiplied"});let p=s.createComputePipeline({layout:"auto",compute:{module:s.createShaderModule({code:i}),entryPoint:"main"}}),m=s.createRenderPipeline({layout:"auto",vertex:{module:s.createShaderModule({code:a.Z}),entryPoint:"vert_main"},fragment:{module:s.createShaderModule({code:a.Z}),entryPoint:"frag_main",targets:[{format:d}]},primitive:{topology:"triangle-list"}}),g=s.createSampler({magFilter:"linear",minFilter:"linear"}),f=await fetch("/assets/img/Di-3d.png"),h=await createImageBitmap(await f.blob()),[v,b]=[h.width,h.height],x=s.createTexture({size:[v,b,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});s.queue.copyExternalImageToTexture({source:h},{texture:x},[h.width,h.height]);let w=[0,1].map(()=>s.createTexture({size:{width:v,height:b},format:"rgba8unorm",usage:GPUTextureUsage.COPY_DST|GPUTextureUsage.STORAGE_BINDING|GPUTextureUsage.TEXTURE_BINDING})),P=(()=>{let e=s.createBuffer({size:4,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM});return new Uint32Array(e.getMappedRange())[0]=0,e.unmap(),e})(),G=(()=>{let e=s.createBuffer({size:4,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM});return new Uint32Array(e.getMappedRange())[0]=1,e.unmap(),e})(),B=s.createBuffer({size:8,usage:GPUBufferUsage.COPY_DST|GPUBufferUsage.UNIFORM}),y=s.createBindGroup({layout:p.getBindGroupLayout(0),entries:[{binding:0,resource:g},{binding:1,resource:{buffer:B}}]}),T=s.createBindGroup({layout:p.getBindGroupLayout(1),entries:[{binding:1,resource:x.createView()},{binding:2,resource:w[0].createView()},{binding:3,resource:{buffer:P}}]}),U=s.createBindGroup({layout:p.getBindGroupLayout(1),entries:[{binding:1,resource:w[0].createView()},{binding:2,resource:w[1].createView()},{binding:3,resource:{buffer:G}}]}),S=s.createBindGroup({layout:p.getBindGroupLayout(1),entries:[{binding:1,resource:w[1].createView()},{binding:2,resource:w[0].createView()},{binding:3,resource:{buffer:P}}]}),_=s.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:0,resource:g},{binding:1,resource:w[1].createView()}]}),I={filterSize:15,iterations:2},C=()=>{n=128-(I.filterSize-1),s.queue.writeBuffer(B,0,new Uint32Array([I.filterSize,n]))};o.add(I,"filterSize",1,33).step(2).onChange(C),o.add(I,"iterations",1,10).step(1),C(),requestAnimationFrame(function e(){if(!r.active)return;let t=s.createCommandEncoder(),i=t.beginComputePass();i.setPipeline(p),i.setBindGroup(0,y),i.setBindGroup(1,T),i.dispatchWorkgroups(Math.ceil(v/n),Math.ceil(b/4)),i.setBindGroup(1,U),i.dispatchWorkgroups(Math.ceil(b/n),Math.ceil(v/4));for(let a=0;a(0,r.Tl)({name:"Image Blur",description:"This example shows how to blur an image using a WebGPU compute shader.",gui:!0,init:u,sources:[{name:o.substring(21),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport blurWGSL from './blur.wgsl';\nimport fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';\n\n// Contants from the blur.wgsl shader.\nconst tileDim = 128;\nconst batch = [4, 4];\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const blurPipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: blurWGSL,\n }),\n entryPoint: 'main',\n },\n });\n\n const fullscreenQuadPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'vert_main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'frag_main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const response = await fetch('/assets/img/Di-3d.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n const [srcWidth, srcHeight] = [imageBitmap.width, imageBitmap.height];\n const cubeTexture = device.createTexture({\n size: [srcWidth, srcHeight, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubeTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n\n const textures = [0, 1].map(() => {\n return device.createTexture({\n size: {\n width: srcWidth,\n height: srcHeight,\n },\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.STORAGE_BINDING |\n GPUTextureUsage.TEXTURE_BINDING,\n });\n });\n\n const buffer0 = (() => {\n const buffer = device.createBuffer({\n size: 4,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM,\n });\n new Uint32Array(buffer.getMappedRange())[0] = 0;\n buffer.unmap();\n return buffer;\n })();\n\n const buffer1 = (() => {\n const buffer = device.createBuffer({\n size: 4,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM,\n });\n new Uint32Array(buffer.getMappedRange())[0] = 1;\n buffer.unmap();\n return buffer;\n })();\n\n const blurParamsBuffer = device.createBuffer({\n size: 8,\n usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,\n });\n\n const computeConstants = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: sampler,\n },\n {\n binding: 1,\n resource: {\n buffer: blurParamsBuffer,\n },\n },\n ],\n });\n\n const computeBindGroup0 = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 1,\n resource: cubeTexture.createView(),\n },\n {\n binding: 2,\n resource: textures[0].createView(),\n },\n {\n binding: 3,\n resource: {\n buffer: buffer0,\n },\n },\n ],\n });\n\n const computeBindGroup1 = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 1,\n resource: textures[0].createView(),\n },\n {\n binding: 2,\n resource: textures[1].createView(),\n },\n {\n binding: 3,\n resource: {\n buffer: buffer1,\n },\n },\n ],\n });\n\n const computeBindGroup2 = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 1,\n resource: textures[1].createView(),\n },\n {\n binding: 2,\n resource: textures[0].createView(),\n },\n {\n binding: 3,\n resource: {\n buffer: buffer0,\n },\n },\n ],\n });\n\n const showResultBindGroup = device.createBindGroup({\n layout: fullscreenQuadPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: sampler,\n },\n {\n binding: 1,\n resource: textures[1].createView(),\n },\n ],\n });\n\n const settings = {\n filterSize: 15,\n iterations: 2,\n };\n\n let blockDim: number;\n const updateSettings = () => {\n blockDim = tileDim - (settings.filterSize - 1);\n device.queue.writeBuffer(\n blurParamsBuffer,\n 0,\n new Uint32Array([settings.filterSize, blockDim])\n );\n };\n gui.add(settings, 'filterSize', 1, 33).step(2).onChange(updateSettings);\n gui.add(settings, 'iterations', 1, 10).step(1);\n\n updateSettings();\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const commandEncoder = device.createCommandEncoder();\n\n const computePass = commandEncoder.beginComputePass();\n computePass.setPipeline(blurPipeline);\n computePass.setBindGroup(0, computeConstants);\n\n computePass.setBindGroup(1, computeBindGroup0);\n computePass.dispatchWorkgroups(\n Math.ceil(srcWidth / blockDim),\n Math.ceil(srcHeight / batch[1])\n );\n\n computePass.setBindGroup(1, computeBindGroup1);\n computePass.dispatchWorkgroups(\n Math.ceil(srcHeight / blockDim),\n Math.ceil(srcWidth / batch[1])\n );\n\n for (let i = 0; i < settings.iterations - 1; ++i) {\n computePass.setBindGroup(1, computeBindGroup2);\n computePass.dispatchWorkgroups(\n Math.ceil(srcWidth / blockDim),\n Math.ceil(srcHeight / batch[1])\n );\n\n computePass.setBindGroup(1, computeBindGroup1);\n computePass.dispatchWorkgroups(\n Math.ceil(srcHeight / blockDim),\n Math.ceil(srcWidth / batch[1])\n );\n }\n\n computePass.end();\n\n const passEncoder = commandEncoder.beginRenderPass({\n colorAttachments: [\n {\n view: context.getCurrentTexture().createView(),\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n });\n\n passEncoder.setPipeline(fullscreenQuadPipeline);\n passEncoder.setBindGroup(0, showResultBindGroup);\n passEncoder.draw(6);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst ImageBlur: () => JSX.Element = () =>\n makeSample({\n name: 'Image Blur',\n description:\n 'This example shows how to blur an image using a WebGPU compute shader.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './blur.wgsl',\n contents: blurWGSL,\n editable: true,\n },\n {\n name: '../../shaders/fullscreenTexturedQuad.wgsl',\n contents: fullscreenTexturedQuadWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default ImageBlur;\n"},{name:"./blur.wgsl",contents:i,editable:!0},{name:"../../shaders/fullscreenTexturedQuad.wgsl",contents:a.Z,editable:!0}],filename:o});var c=s},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},134:function(e,n){"use strict";n.Z="@group(0) @binding(0) var mySampler : sampler;\n@group(0) @binding(1) var myTexture : texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n}\n\n@vertex\nfn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {\n const pos = array(\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2(-1.0, 1.0),\n );\n\n const uv = array(\n vec2(1.0, 0.0),\n vec2(1.0, 1.0),\n vec2(0.0, 1.0),\n vec2(1.0, 0.0),\n vec2(0.0, 1.0),\n vec2(0.0, 0.0),\n );\n\n var output : VertexOutput;\n output.Position = vec4(pos[VertexIndex], 0.0, 1.0);\n output.fragUV = uv[VertexIndex];\n return output;\n}\n\n@fragment\nfn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV);\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/770.7ae9d850819591f8.js b/_next/static/chunks/770.7ae9d850819591f8.js new file mode 100644 index 00000000..7b77cae0 --- /dev/null +++ b/_next/static/chunks/770.7ae9d850819591f8.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[770],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),i=t(9008),a=t.n(i),o=t(1163),u=t(7294),s=t(9147),c=t.n(s);t(7319);let l=e=>{let n=(0,u.useRef)(null),i=(0,u.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:i}=e;return{name:n,...function(e){let n;let i=null;{i=document.createElement("div");let a=t(4631);n=a(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){i&&t&&(t.appendChild(i),n.setOption("value",e))}})})}}}(i)}}),e.sources),s=(0,u.useRef)(null),l=(0,u.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,u.useRef)(null),p=(0,u.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,o.useRouter)(),g=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[f,h]=(0,u.useState)(null),[v,b]=(0,u.useState)(null);return(0,u.useEffect)(()=>{if(g?b(g[1]):b(i[0].name),l&&s.current)for(s.current.appendChild(l.domElement);l.__controllers.length>0;)l.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let a=n.current;if(!a)throw Error("The canvas is not available");let o=e.init({canvas:a,pageState:t,gui:l,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),h(e)})}catch(u){console.error(u),h(u)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(a(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),f?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(f)})]}):null]}),(0,r.jsxs)("div",{className:c().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:s}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:c().sourceFileNav,children:(0,r.jsx)("ul",{children:i.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){b(e.name)},children:e.name})},n))})}),i.map((e,n)=>(0,r.jsx)(e.Container,{className:c().sourceFileContainer,"data-active":v==e.name},n))]})]})},d=e=>(0,r.jsx)(l,{...e});function p(e,n){if(!e)throw Error(n)}},1770:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return c}});var r=t(5671),i="struct Params {\n filterDim : i32,\n blockDim : u32,\n}\n\n@group(0) @binding(0) var samp : sampler;\n@group(0) @binding(1) var params : Params;\n@group(1) @binding(1) var inputTex : texture_2d;\n@group(1) @binding(2) var outputTex : texture_storage_2d;\n\nstruct Flip {\n value : u32,\n}\n@group(1) @binding(3) var flip : Flip;\n\n// This shader blurs the input texture in one direction, depending on whether\n// |flip.value| is 0 or 1.\n// It does so by running (128 / 4) threads per workgroup to load 128\n// texels into 4 rows of shared memory. Each thread loads a\n// 4 x 4 block of texels to take advantage of the texture sampling\n// hardware.\n// Then, each thread computes the blur result by averaging the adjacent texel values\n// in shared memory.\n// Because we're operating on a subset of the texture, we cannot compute all of the\n// results since not all of the neighbors are available in shared memory.\n// Specifically, with 128 x 128 tiles, we can only compute and write out\n// square blocks of size 128 - (filterSize - 1). We compute the number of blocks\n// needed in Javascript and dispatch that amount.\n\nvar tile : array, 128>, 4>;\n\n@compute @workgroup_size(32, 1, 1)\nfn main(\n @builtin(workgroup_id) WorkGroupID : vec3,\n @builtin(local_invocation_id) LocalInvocationID : vec3\n) {\n let filterOffset = (params.filterDim - 1) / 2;\n let dims = vec2(textureDimensions(inputTex, 0));\n let baseIndex = vec2(WorkGroupID.xy * vec2(params.blockDim, 4) +\n LocalInvocationID.xy * vec2(4, 1))\n - vec2(filterOffset, 0);\n\n for (var r = 0; r < 4; r++) {\n for (var c = 0; c < 4; c++) {\n var loadIndex = baseIndex + vec2(c, r);\n if (flip.value != 0u) {\n loadIndex = loadIndex.yx;\n }\n\n tile[r][4 * LocalInvocationID.x + u32(c)] = textureSampleLevel(\n inputTex,\n samp,\n (vec2(loadIndex) + vec2(0.25, 0.25)) / vec2(dims),\n 0.0\n ).rgb;\n }\n }\n\n workgroupBarrier();\n\n for (var r = 0; r < 4; r++) {\n for (var c = 0; c < 4; c++) {\n var writeIndex = baseIndex + vec2(c, r);\n if (flip.value != 0) {\n writeIndex = writeIndex.yx;\n }\n\n let center = i32(4 * LocalInvocationID.x) + c;\n if (center >= filterOffset &&\n center < 128 - filterOffset &&\n all(writeIndex < dims)) {\n var acc = vec3(0.0, 0.0, 0.0);\n for (var f = 0; f < params.filterDim; f++) {\n var i = center + f - filterOffset;\n acc = acc + (1.0 / f32(params.filterDim)) * tile[r][i];\n }\n textureStore(outputTex, writeIndex, vec4(acc, 1.0));\n }\n }\n }\n}\n",a=t(134),o="src/sample/imageBlur/main.ts";let u=async e=>{let n,{canvas:t,pageState:r,gui:o}=e,u=await navigator.gpu.requestAdapter(),s=await u.requestDevice();if(!r.active)return;let c=t.getContext("webgpu"),l=window.devicePixelRatio;t.width=t.clientWidth*l,t.height=t.clientHeight*l;let d=navigator.gpu.getPreferredCanvasFormat();c.configure({device:s,format:d,alphaMode:"premultiplied"});let p=s.createComputePipeline({layout:"auto",compute:{module:s.createShaderModule({code:i}),entryPoint:"main"}}),m=s.createRenderPipeline({layout:"auto",vertex:{module:s.createShaderModule({code:a.Z}),entryPoint:"vert_main"},fragment:{module:s.createShaderModule({code:a.Z}),entryPoint:"frag_main",targets:[{format:d}]},primitive:{topology:"triangle-list"}}),g=s.createSampler({magFilter:"linear",minFilter:"linear"}),f=await fetch("../assets/img/Di-3d.png"),h=await createImageBitmap(await f.blob()),[v,b]=[h.width,h.height],x=s.createTexture({size:[v,b,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});s.queue.copyExternalImageToTexture({source:h},{texture:x},[h.width,h.height]);let w=[0,1].map(()=>s.createTexture({size:{width:v,height:b},format:"rgba8unorm",usage:GPUTextureUsage.COPY_DST|GPUTextureUsage.STORAGE_BINDING|GPUTextureUsage.TEXTURE_BINDING})),P=(()=>{let e=s.createBuffer({size:4,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM});return new Uint32Array(e.getMappedRange())[0]=0,e.unmap(),e})(),G=(()=>{let e=s.createBuffer({size:4,mappedAtCreation:!0,usage:GPUBufferUsage.UNIFORM});return new Uint32Array(e.getMappedRange())[0]=1,e.unmap(),e})(),B=s.createBuffer({size:8,usage:GPUBufferUsage.COPY_DST|GPUBufferUsage.UNIFORM}),y=s.createBindGroup({layout:p.getBindGroupLayout(0),entries:[{binding:0,resource:g},{binding:1,resource:{buffer:B}}]}),T=s.createBindGroup({layout:p.getBindGroupLayout(1),entries:[{binding:1,resource:x.createView()},{binding:2,resource:w[0].createView()},{binding:3,resource:{buffer:P}}]}),U=s.createBindGroup({layout:p.getBindGroupLayout(1),entries:[{binding:1,resource:w[0].createView()},{binding:2,resource:w[1].createView()},{binding:3,resource:{buffer:G}}]}),S=s.createBindGroup({layout:p.getBindGroupLayout(1),entries:[{binding:1,resource:w[1].createView()},{binding:2,resource:w[0].createView()},{binding:3,resource:{buffer:P}}]}),_=s.createBindGroup({layout:m.getBindGroupLayout(0),entries:[{binding:0,resource:g},{binding:1,resource:w[1].createView()}]}),I={filterSize:15,iterations:2},C=()=>{n=128-(I.filterSize-1),s.queue.writeBuffer(B,0,new Uint32Array([I.filterSize,n]))};o.add(I,"filterSize",1,33).step(2).onChange(C),o.add(I,"iterations",1,10).step(1),C(),requestAnimationFrame(function e(){if(!r.active)return;let t=s.createCommandEncoder(),i=t.beginComputePass();i.setPipeline(p),i.setBindGroup(0,y),i.setBindGroup(1,T),i.dispatchWorkgroups(Math.ceil(v/n),Math.ceil(b/4)),i.setBindGroup(1,U),i.dispatchWorkgroups(Math.ceil(b/n),Math.ceil(v/4));for(let a=0;a(0,r.Tl)({name:"Image Blur",description:"This example shows how to blur an image using a WebGPU compute shader.",gui:!0,init:u,sources:[{name:o.substring(21),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nimport blurWGSL from './blur.wgsl';\nimport fullscreenTexturedQuadWGSL from '../../shaders/fullscreenTexturedQuad.wgsl';\n\n// Contants from the blur.wgsl shader.\nconst tileDim = 128;\nconst batch = [4, 4];\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n const blurPipeline = device.createComputePipeline({\n layout: 'auto',\n compute: {\n module: device.createShaderModule({\n code: blurWGSL,\n }),\n entryPoint: 'main',\n },\n });\n\n const fullscreenQuadPipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'vert_main',\n },\n fragment: {\n module: device.createShaderModule({\n code: fullscreenTexturedQuadWGSL,\n }),\n entryPoint: 'frag_main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n },\n });\n\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const response = await fetch('../assets/img/Di-3d.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n const [srcWidth, srcHeight] = [imageBitmap.width, imageBitmap.height];\n const cubeTexture = device.createTexture({\n size: [srcWidth, srcHeight, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubeTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n\n const textures = [0, 1].map(() => {\n return device.createTexture({\n size: {\n width: srcWidth,\n height: srcHeight,\n },\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.STORAGE_BINDING |\n GPUTextureUsage.TEXTURE_BINDING,\n });\n });\n\n const buffer0 = (() => {\n const buffer = device.createBuffer({\n size: 4,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM,\n });\n new Uint32Array(buffer.getMappedRange())[0] = 0;\n buffer.unmap();\n return buffer;\n })();\n\n const buffer1 = (() => {\n const buffer = device.createBuffer({\n size: 4,\n mappedAtCreation: true,\n usage: GPUBufferUsage.UNIFORM,\n });\n new Uint32Array(buffer.getMappedRange())[0] = 1;\n buffer.unmap();\n return buffer;\n })();\n\n const blurParamsBuffer = device.createBuffer({\n size: 8,\n usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,\n });\n\n const computeConstants = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: sampler,\n },\n {\n binding: 1,\n resource: {\n buffer: blurParamsBuffer,\n },\n },\n ],\n });\n\n const computeBindGroup0 = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 1,\n resource: cubeTexture.createView(),\n },\n {\n binding: 2,\n resource: textures[0].createView(),\n },\n {\n binding: 3,\n resource: {\n buffer: buffer0,\n },\n },\n ],\n });\n\n const computeBindGroup1 = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 1,\n resource: textures[0].createView(),\n },\n {\n binding: 2,\n resource: textures[1].createView(),\n },\n {\n binding: 3,\n resource: {\n buffer: buffer1,\n },\n },\n ],\n });\n\n const computeBindGroup2 = device.createBindGroup({\n layout: blurPipeline.getBindGroupLayout(1),\n entries: [\n {\n binding: 1,\n resource: textures[1].createView(),\n },\n {\n binding: 2,\n resource: textures[0].createView(),\n },\n {\n binding: 3,\n resource: {\n buffer: buffer0,\n },\n },\n ],\n });\n\n const showResultBindGroup = device.createBindGroup({\n layout: fullscreenQuadPipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: sampler,\n },\n {\n binding: 1,\n resource: textures[1].createView(),\n },\n ],\n });\n\n const settings = {\n filterSize: 15,\n iterations: 2,\n };\n\n let blockDim: number;\n const updateSettings = () => {\n blockDim = tileDim - (settings.filterSize - 1);\n device.queue.writeBuffer(\n blurParamsBuffer,\n 0,\n new Uint32Array([settings.filterSize, blockDim])\n );\n };\n gui.add(settings, 'filterSize', 1, 33).step(2).onChange(updateSettings);\n gui.add(settings, 'iterations', 1, 10).step(1);\n\n updateSettings();\n\n function frame() {\n // Sample is no longer the active page.\n if (!pageState.active) return;\n\n const commandEncoder = device.createCommandEncoder();\n\n const computePass = commandEncoder.beginComputePass();\n computePass.setPipeline(blurPipeline);\n computePass.setBindGroup(0, computeConstants);\n\n computePass.setBindGroup(1, computeBindGroup0);\n computePass.dispatchWorkgroups(\n Math.ceil(srcWidth / blockDim),\n Math.ceil(srcHeight / batch[1])\n );\n\n computePass.setBindGroup(1, computeBindGroup1);\n computePass.dispatchWorkgroups(\n Math.ceil(srcHeight / blockDim),\n Math.ceil(srcWidth / batch[1])\n );\n\n for (let i = 0; i < settings.iterations - 1; ++i) {\n computePass.setBindGroup(1, computeBindGroup2);\n computePass.dispatchWorkgroups(\n Math.ceil(srcWidth / blockDim),\n Math.ceil(srcHeight / batch[1])\n );\n\n computePass.setBindGroup(1, computeBindGroup1);\n computePass.dispatchWorkgroups(\n Math.ceil(srcHeight / blockDim),\n Math.ceil(srcWidth / batch[1])\n );\n }\n\n computePass.end();\n\n const passEncoder = commandEncoder.beginRenderPass({\n colorAttachments: [\n {\n view: context.getCurrentTexture().createView(),\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n });\n\n passEncoder.setPipeline(fullscreenQuadPipeline);\n passEncoder.setBindGroup(0, showResultBindGroup);\n passEncoder.draw(6);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst ImageBlur: () => JSX.Element = () =>\n makeSample({\n name: 'Image Blur',\n description:\n 'This example shows how to blur an image using a WebGPU compute shader.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './blur.wgsl',\n contents: blurWGSL,\n editable: true,\n },\n {\n name: '../../shaders/fullscreenTexturedQuad.wgsl',\n contents: fullscreenTexturedQuadWGSL,\n editable: true,\n },\n ],\n filename: __filename,\n });\n\nexport default ImageBlur;\n"},{name:"./blur.wgsl",contents:i,editable:!0},{name:"../../shaders/fullscreenTexturedQuad.wgsl",contents:a.Z,editable:!0}],filename:o});var c=s},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},134:function(e,n){"use strict";n.Z="@group(0) @binding(0) var mySampler : sampler;\n@group(0) @binding(1) var myTexture : texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n}\n\n@vertex\nfn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {\n const pos = array(\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2(-1.0, 1.0),\n );\n\n const uv = array(\n vec2(1.0, 0.0),\n vec2(1.0, 1.0),\n vec2(0.0, 1.0),\n vec2(1.0, 0.0),\n vec2(0.0, 1.0),\n vec2(0.0, 0.0),\n );\n\n var output : VertexOutput;\n output.Position = vec4(pos[VertexIndex], 0.0, 1.0);\n output.fragUV = uv[VertexIndex];\n return output;\n}\n\n@fragment\nfn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 {\n return textureSample(myTexture, mySampler, fragUV);\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/878.ac7b18c5949410b1.js b/_next/static/chunks/878.616dc3f7dab79bc0.js similarity index 69% rename from _next/static/chunks/878.ac7b18c5949410b1.js rename to _next/static/chunks/878.616dc3f7dab79bc0.js index 0db37fb5..e350d982 100644 --- a/_next/static/chunks/878.ac7b18c5949410b1.js +++ b/_next/static/chunks/878.616dc3f7dab79bc0.js @@ -1 +1 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[878],{5671:function(e,t,n){"use strict";n.d(t,{Tl:function(){return m},hu:function(){return p}});var a=n(5893),i=n(9008),r=n.n(i),o=n(1163),s=n(7294),c=n(9147),l=n.n(c);n(7319);let u=e=>{let t=(0,s.useRef)(null),i=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:t,contents:i}=e;return{name:t,...function(e){let t;let i=null;{i=document.createElement("div");let r=n(4631);t=r(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(n){return(0,a.jsx)("div",{...n,children:(0,a.jsx)("div",{ref(n){i&&n&&(n.appendChild(i),t.setOption("value",e))}})})}}}(i)}}),e.sources),c=(0,s.useRef)(null),u=(0,s.useMemo)(()=>{if(e.gui){let t=n(4376);return new t.GUI({autoPlace:!1})}},[]),m=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let t=n(2792);return new t}},[]),h=(0,o.useRouter)(),d=h.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[f,g]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(d?x(d[1]):x(i[0].name),u&&c.current)for(c.current.appendChild(u.domElement);u.__controllers.length>0;)u.__controllers[0].remove();p&&m.current&&(p.dom.style.position="absolute",p.showPanel(1),m.current.appendChild(p.dom));let n={active:!0},a=()=>{n.active=!1};try{let r=t.current;if(!r)throw Error("The canvas is not available");let o=e.init({canvas:r,pageState:n,gui:u,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),g(e)})}catch(s){console.error(s),g(s)}return a},[]),(0,a.jsxs)("main",{children:[(0,a.jsxs)(r(),{children:[(0,a.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,a.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,a.jsx)("meta",{name:"description",content:e.description}),(0,a.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("h1",{children:e.name}),(0,a.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,a.jsx)("p",{children:e.description}),f?(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,a.jsx)("p",{children:"".concat(f)})]}):null]}),(0,a.jsxs)("div",{className:l().canvasContainer,children:[(0,a.jsx)("div",{style:{position:"absolute",left:10},ref:m}),(0,a.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,a.jsx)("canvas",{ref:t})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("nav",{className:l().sourceFileNav,children:(0,a.jsx)("ul",{children:i.map((e,t)=>(0,a.jsx)("li",{children:(0,a.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},t))})}),i.map((e,t)=>(0,a.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":v==e.name},t))]})]})},m=e=>(0,a.jsx)(u,{...e});function p(e,t){if(!e)throw Error(t)}},4655:function(e,t,n){"use strict";n.d(t,{Ax:function(){return r},MO:function(){return o},O$:function(){return a},v8:function(){return i},zS:function(){return s}});let a=40,i=0,r=32,o=36,s=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0])},3878:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return f}});var a=n(6416),i=n(5671),r=n(4655),o="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n\n@group(0) @binding(0) var uniforms : Uniforms;\n@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4f,\n @location(0) fragUV : vec2f,\n}\n\n@vertex\nfn vertex_main(\n @location(0) position : vec4f,\n @location(1) uv : vec2f\n) -> VertexOutput {\n return VertexOutput(uniforms.modelViewProjectionMatrix * position, uv);\n}\n\n@fragment\nfn fragment_main(@location(0) fragUV: vec2f) -> @location(0) vec4f {\n return textureSample(myTexture, mySampler, fragUV);\n}\n";let s={name:"src/sample/cameras/camera.ts".substring(19),contents:"// Note: The code in this file does not use the 'dst' output parameter of functions in the\n// 'wgpu-matrix' library, so produces many temporary vectors and matrices.\n// This is intentional, as this sample prefers readability over performance.\nimport { Mat4, Vec3, Vec4, mat4, vec3 } from 'wgpu-matrix';\nimport Input from './input';\n\n// Information about this file, used by the sample UI\nexport const cameraSourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n};\n\n// Common interface for camera implementations\nexport default interface Camera {\n // update updates the camera using the user-input and returns the view matrix.\n update(delta_time: number, input: Input): Mat4;\n\n // The camera matrix.\n // This is the inverse of the view matrix.\n matrix: Mat4;\n // Alias to column vector 0 of the camera matrix.\n right: Vec4;\n // Alias to column vector 1 of the camera matrix.\n up: Vec4;\n // Alias to column vector 2 of the camera matrix.\n back: Vec4;\n // Alias to column vector 3 of the camera matrix.\n position: Vec4;\n}\n\n// The common functionality between camera implementations\nclass CameraBase {\n // The camera matrix\n private matrix_ = new Float32Array([\n 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n ]);\n\n // The calculated view matrix\n private readonly view_ = mat4.create();\n\n // Aliases to column vectors of the matrix\n private right_ = new Float32Array(this.matrix_.buffer, 4 * 0, 4);\n private up_ = new Float32Array(this.matrix_.buffer, 4 * 4, 4);\n private back_ = new Float32Array(this.matrix_.buffer, 4 * 8, 4);\n private position_ = new Float32Array(this.matrix_.buffer, 4 * 12, 4);\n\n // Returns the camera matrix\n get matrix() {\n return this.matrix_;\n }\n // Assigns `mat` to the camera matrix\n set matrix(mat: Mat4) {\n mat4.copy(mat, this.matrix_);\n }\n\n // Returns the camera view matrix\n get view() {\n return this.view_;\n }\n // Assigns `mat` to the camera view\n set view(mat: Mat4) {\n mat4.copy(mat, this.view_);\n }\n\n // Returns column vector 0 of the camera matrix\n get right() {\n return this.right_;\n }\n // Assigns `vec` to the first 3 elements of column vector 0 of the camera matrix\n set right(vec: Vec3) {\n vec3.copy(vec, this.right_);\n }\n\n // Returns column vector 1 of the camera matrix\n get up() {\n return this.up_;\n }\n // Assigns `vec` to the first 3 elements of column vector 1 of the camera matrix\n set up(vec: Vec3) {\n vec3.copy(vec, this.up_);\n }\n\n // Returns column vector 2 of the camera matrix\n get back() {\n return this.back_;\n }\n // Assigns `vec` to the first 3 elements of column vector 2 of the camera matrix\n set back(vec: Vec3) {\n vec3.copy(vec, this.back_);\n }\n\n // Returns column vector 3 of the camera matrix\n get position() {\n return this.position_;\n }\n // Assigns `vec` to the first 3 elements of column vector 3 of the camera matrix\n set position(vec: Vec3) {\n vec3.copy(vec, this.position_);\n }\n}\n\n// WASDCamera is a camera implementation that behaves similar to first-person-shooter PC games.\nexport class WASDCamera extends CameraBase implements Camera {\n // The camera absolute pitch angle\n private pitch = 0;\n // The camera absolute yaw angle\n private yaw = 0;\n\n // The movement veloicty\n private readonly velocity_ = vec3.create();\n\n // Speed multiplier for camera movement\n movementSpeed = 10;\n\n // Speed multiplier for camera rotation\n rotationSpeed = 1;\n\n // Movement velocity drag coeffient [0 .. 1]\n // 0: Continues forever\n // 1: Instantly stops moving\n frictionCoefficient = 0.99;\n\n // Returns velocity vector\n get velocity() {\n return this.velocity_;\n }\n // Assigns `vec` to the velocity vector\n set velocity(vec: Vec3) {\n vec3.copy(vec, this.velocity_);\n }\n\n // Construtor\n constructor(options?: {\n // The initial position of the camera\n position?: Vec3;\n // The initial target of the camera\n target?: Vec3;\n }) {\n super();\n if (options && (options.position || options.target)) {\n const position = options.position ?? vec3.create(0, 0, -5);\n const target = options.target ?? vec3.create(0, 0, 0);\n const forward = vec3.normalize(vec3.sub(target, position));\n this.recalculateAngles(forward);\n this.position = position;\n }\n }\n\n // Returns the camera matrix\n get matrix() {\n return super.matrix;\n }\n\n // Assigns `mat` to the camera matrix, and recalcuates the camera angles\n set matrix(mat: Mat4) {\n super.matrix = mat;\n this.recalculateAngles(this.back);\n }\n\n update(deltaTime: number, input: Input): Mat4 {\n const sign = (positive: boolean, negative: boolean) =>\n (positive ? 1 : 0) - (negative ? 1 : 0);\n\n // Apply the delta rotation to the pitch and yaw angles\n this.yaw -= input.analog.x * deltaTime * this.rotationSpeed;\n this.pitch -= input.analog.y * deltaTime * this.rotationSpeed;\n\n // Wrap yaw between [0\xb0 .. 360\xb0], just to prevent large accumulation.\n this.yaw = mod(this.yaw, Math.PI * 2);\n // Clamp pitch between [-90\xb0 .. +90\xb0] to prevent somersaults.\n this.pitch = clamp(this.pitch, -Math.PI / 2, Math.PI / 2);\n\n // Save the current position, as we're about to rebuild the camera matrix.\n const position = vec3.copy(this.position);\n\n // Reconstruct the camera's rotation, and store into the camera matrix.\n super.matrix = mat4.rotateX(mat4.rotationY(this.yaw), this.pitch);\n\n // Calculate the new target velocity\n const digital = input.digital;\n const deltaRight = sign(digital.right, digital.left);\n const deltaUp = sign(digital.up, digital.down);\n const targetVelocity = vec3.create();\n const deltaBack = sign(digital.backward, digital.forward);\n vec3.addScaled(targetVelocity, this.right, deltaRight, targetVelocity);\n vec3.addScaled(targetVelocity, this.up, deltaUp, targetVelocity);\n vec3.addScaled(targetVelocity, this.back, deltaBack, targetVelocity);\n vec3.normalize(targetVelocity, targetVelocity);\n vec3.mulScalar(targetVelocity, this.movementSpeed, targetVelocity);\n\n // Mix new target velocity\n this.velocity = lerp(\n targetVelocity,\n this.velocity,\n Math.pow(1 - this.frictionCoefficient, deltaTime)\n );\n\n // Integrate velocity to calculate new position\n this.position = vec3.addScaled(position, this.velocity, deltaTime);\n\n // Invert the camera matrix to build the view matrix\n this.view = mat4.invert(this.matrix);\n return this.view;\n }\n\n // Recalculates the yaw and pitch values from a directional vector\n recalculateAngles(dir: Vec3) {\n this.yaw = Math.atan2(dir[0], dir[2]);\n this.pitch = -Math.asin(dir[1]);\n }\n}\n\n// ArcballCamera implements a basic orbiting camera around the world origin\nexport class ArcballCamera extends CameraBase implements Camera {\n // The camera distance from the target\n private distance = 0;\n\n // The current angular velocity\n private angularVelocity = 0;\n\n // The current rotation axis\n private axis_ = vec3.create();\n\n // Returns the rotation axis\n get axis() {\n return this.axis_;\n }\n // Assigns `vec` to the rotation axis\n set axis(vec: Vec3) {\n vec3.copy(vec, this.axis_);\n }\n\n // Speed multiplier for camera rotation\n rotationSpeed = 1;\n\n // Speed multiplier for camera zoom\n zoomSpeed = 0.1;\n\n // Rotation velocity drag coeffient [0 .. 1]\n // 0: Spins forever\n // 1: Instantly stops spinning\n frictionCoefficient = 0.999;\n\n // Construtor\n constructor(options?: {\n // The initial position of the camera\n position?: Vec3;\n }) {\n super();\n if (options && options.position) {\n this.position = options.position;\n this.distance = vec3.len(this.position);\n this.back = vec3.normalize(this.position);\n this.recalcuateRight();\n this.recalcuateUp();\n }\n }\n\n // Returns the camera matrix\n get matrix() {\n return super.matrix;\n }\n\n // Assigns `mat` to the camera matrix, and recalcuates the distance\n set matrix(mat: Mat4) {\n super.matrix = mat;\n this.distance = vec3.len(this.position);\n }\n\n update(deltaTime: number, input: Input): Mat4 {\n const epsilon = 0.0000001;\n\n if (input.analog.touching) {\n // Currently being dragged.\n this.angularVelocity = 0;\n } else {\n // Dampen any existing angular velocity\n this.angularVelocity *= Math.pow(1 - this.frictionCoefficient, deltaTime);\n }\n\n // Calculate the movement vector\n const movement = vec3.create();\n vec3.addScaled(movement, this.right, input.analog.x, movement);\n vec3.addScaled(movement, this.up, -input.analog.y, movement);\n\n // Cross the movement vector with the view direction to calculate the rotation axis x magnitude\n const crossProduct = vec3.cross(movement, this.back);\n\n // Calculate the magnitude of the drag\n const magnitude = vec3.len(crossProduct);\n\n if (magnitude > epsilon) {\n // Normalize the crossProduct to get the rotation axis\n this.axis = vec3.scale(crossProduct, 1 / magnitude);\n\n // Remember the current angular velocity. This is used when the touch is released for a fling.\n this.angularVelocity = magnitude * this.rotationSpeed;\n }\n\n // The rotation around this.axis to apply to the camera matrix this update\n const rotationAngle = this.angularVelocity * deltaTime;\n if (rotationAngle > epsilon) {\n // Rotate the matrix around axis\n // Note: The rotation is not done as a matrix-matrix multiply as the repeated multiplications\n // will quickly introduce substantial error into the matrix.\n this.back = vec3.normalize(rotate(this.back, this.axis, rotationAngle));\n this.recalcuateRight();\n this.recalcuateUp();\n }\n\n // recalculate `this.position` from `this.back` considering zoom\n if (input.analog.zoom !== 0) {\n this.distance *= 1 + input.analog.zoom * this.zoomSpeed;\n }\n this.position = vec3.scale(this.back, this.distance);\n\n // Invert the camera matrix to build the view matrix\n this.view = mat4.invert(this.matrix);\n return this.view;\n }\n\n // Assigns `this.right` with the cross product of `this.up` and `this.back`\n recalcuateRight() {\n this.right = vec3.normalize(vec3.cross(this.up, this.back));\n }\n\n // Assigns `this.up` with the cross product of `this.back` and `this.right`\n recalcuateUp() {\n this.up = vec3.normalize(vec3.cross(this.back, this.right));\n }\n}\n\n// Returns `x` clamped between [`min` .. `max`]\nfunction clamp(x: number, min: number, max: number): number {\n return Math.min(Math.max(x, min), max);\n}\n\n// Returns `x` float-modulo `div`\nfunction mod(x: number, div: number): number {\n return x - Math.floor(Math.abs(x) / div) * div * Math.sign(x);\n}\n\n// Returns `vec` rotated `angle` radians around `axis`\nfunction rotate(vec: Vec3, axis: Vec3, angle: number): Vec3 {\n return vec3.transformMat4Upper3x3(vec, mat4.rotation(axis, angle));\n}\n\n// Returns the linear interpolation between 'a' and 'b' using 's'\nfunction lerp(a: Vec3, b: Vec3, s: number): Vec3 {\n return vec3.addScaled(a, vec3.sub(b, a), s);\n}\n"};class c{get matrix(){return this.matrix_}set matrix(e){a._E.copy(e,this.matrix_)}get view(){return this.view_}set view(e){a._E.copy(e,this.view_)}get right(){return this.right_}set right(e){a.R3.copy(e,this.right_)}get up(){return this.up_}set up(e){a.R3.copy(e,this.up_)}get back(){return this.back_}set back(e){a.R3.copy(e,this.back_)}get position(){return this.position_}set position(e){a.R3.copy(e,this.position_)}constructor(){this.matrix_=new Float32Array([1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]),this.view_=a._E.create(),this.right_=new Float32Array(this.matrix_.buffer,0,4),this.up_=new Float32Array(this.matrix_.buffer,16,4),this.back_=new Float32Array(this.matrix_.buffer,32,4),this.position_=new Float32Array(this.matrix_.buffer,48,4)}}class l extends c{get velocity(){return this.velocity_}set velocity(e){a.R3.copy(e,this.velocity_)}get matrix(){return super.matrix}set matrix(e){super.matrix=e,this.recalculateAngles(this.back)}update(e,t){var n,i,r,o;let s=(e,t)=>(e?1:0)-(t?1:0);this.yaw-=t.analog.x*e*this.rotationSpeed,this.pitch-=t.analog.y*e*this.rotationSpeed,this.yaw=(n=this.yaw)-Math.floor(Math.abs(n)/(i=2*Math.PI))*i*Math.sign(n),this.pitch=Math.min(Math.max(this.pitch,-Math.PI/2),Math.PI/2);let c=a.R3.copy(this.position);super.matrix=a._E.rotateX(a._E.rotationY(this.yaw),this.pitch);let l=t.digital,u=s(l.right,l.left),m=s(l.up,l.down),p=a.R3.create(),h=s(l.backward,l.forward);return a.R3.addScaled(p,this.right,u,p),a.R3.addScaled(p,this.up,m,p),a.R3.addScaled(p,this.back,h,p),a.R3.normalize(p,p),a.R3.mulScalar(p,this.movementSpeed,p),this.velocity=(r=this.velocity,o=Math.pow(1-this.frictionCoefficient,e),a.R3.addScaled(p,a.R3.sub(r,p),o)),this.position=a.R3.addScaled(c,this.velocity,e),this.view=a._E.invert(this.matrix),this.view}recalculateAngles(e){this.yaw=Math.atan2(e[0],e[2]),this.pitch=-Math.asin(e[1])}constructor(e){if(super(),this.pitch=0,this.yaw=0,this.velocity_=a.R3.create(),this.movementSpeed=10,this.rotationSpeed=1,this.frictionCoefficient=.99,e&&(e.position||e.target)){var t,n;let i=null!==(t=e.position)&&void 0!==t?t:a.R3.create(0,0,-5),r=null!==(n=e.target)&&void 0!==n?n:a.R3.create(0,0,0),o=a.R3.normalize(a.R3.sub(r,i));this.recalculateAngles(o),this.position=i}}}class u extends c{get axis(){return this.axis_}set axis(e){a.R3.copy(e,this.axis_)}get matrix(){return super.matrix}set matrix(e){super.matrix=e,this.distance=a.R3.len(this.position)}update(e,t){var n,i;t.analog.touching?this.angularVelocity=0:this.angularVelocity*=Math.pow(1-this.frictionCoefficient,e);let r=a.R3.create();a.R3.addScaled(r,this.right,t.analog.x,r),a.R3.addScaled(r,this.up,-t.analog.y,r);let o=a.R3.cross(r,this.back),s=a.R3.len(o);s>1e-7&&(this.axis=a.R3.scale(o,1/s),this.angularVelocity=s*this.rotationSpeed);let c=this.angularVelocity*e;return c>1e-7&&(this.back=a.R3.normalize((n=this.back,i=this.axis,a.R3.transformMat4Upper3x3(n,a._E.rotation(i,c)))),this.recalcuateRight(),this.recalcuateUp()),0!==t.analog.zoom&&(this.distance*=1+t.analog.zoom*this.zoomSpeed),this.position=a.R3.scale(this.back,this.distance),this.view=a._E.invert(this.matrix),this.view}recalcuateRight(){this.right=a.R3.normalize(a.R3.cross(this.up,this.back))}recalcuateUp(){this.up=a.R3.normalize(a.R3.cross(this.back,this.right))}constructor(e){super(),this.distance=0,this.angularVelocity=0,this.axis_=a.R3.create(),this.rotationSpeed=1,this.zoomSpeed=.1,this.frictionCoefficient=.999,e&&e.position&&(this.position=e.position,this.distance=a.R3.len(this.position),this.back=a.R3.normalize(this.position),this.recalcuateRight(),this.recalcuateUp())}}let m={name:"src/sample/cameras/input.ts".substring(19),contents:"// Information about this file, used by the sample UI\nexport const inputSourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n};\n\n// Input holds as snapshot of input state\nexport default interface Input {\n // Digital input (e.g keyboard state)\n readonly digital: {\n readonly forward: boolean;\n readonly backward: boolean;\n readonly left: boolean;\n readonly right: boolean;\n readonly up: boolean;\n readonly down: boolean;\n };\n // Analog input (e.g mouse, touchscreen)\n readonly analog: {\n readonly x: number;\n readonly y: number;\n readonly zoom: number;\n readonly touching: boolean;\n };\n}\n\n// InputHandler is a function that when called, returns the current Input state.\nexport type InputHandler = () => Input;\n\n// createInputHandler returns an InputHandler by attaching event handlers to the window.\nexport function createInputHandler(window: Window): InputHandler {\n const digital = {\n forward: false,\n backward: false,\n left: false,\n right: false,\n up: false,\n down: false,\n };\n const analog = {\n x: 0,\n y: 0,\n zoom: 0,\n };\n let mouseDown = false;\n\n const setDigital = (e: KeyboardEvent, value: boolean) => {\n switch (e.code) {\n case 'KeyW':\n digital.forward = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'KeyS':\n digital.backward = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'KeyA':\n digital.left = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'KeyD':\n digital.right = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'Space':\n digital.up = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'ShiftLeft':\n case 'ControlLeft':\n case 'KeyC':\n digital.down = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n }\n };\n\n window.addEventListener('keydown', (e) => setDigital(e, true));\n window.addEventListener('keyup', (e) => setDigital(e, false));\n window.addEventListener('mousedown', () => {\n mouseDown = true;\n });\n window.addEventListener('mouseup', () => {\n mouseDown = false;\n });\n window.addEventListener('mousemove', (e) => {\n mouseDown = (e.buttons & 1) !== 0;\n if (mouseDown) {\n analog.x += e.movementX;\n analog.y += e.movementY;\n }\n });\n window.addEventListener(\n 'wheel',\n (e) => {\n mouseDown = (e.buttons & 1) !== 0;\n if (mouseDown) {\n // The scroll value varies substantially between user agents / browsers.\n // Just use the sign.\n analog.zoom += Math.sign(e.deltaY);\n e.preventDefault();\n e.stopPropagation();\n }\n },\n { passive: false }\n );\n\n return () => {\n const out = {\n digital,\n analog: {\n x: analog.x,\n y: analog.y,\n zoom: analog.zoom,\n touching: mouseDown,\n },\n };\n // Clear the analog values, as these accumulate.\n analog.x = 0;\n analog.y = 0;\n analog.zoom = 0;\n return out;\n };\n}\n"};var p="src/sample/cameras/main.ts";let h=async e=>{let t,{canvas:n,pageState:i,gui:s}=e;if(!i.active)return;let c=function(e){let t={forward:!1,backward:!1,left:!1,right:!1,up:!1,down:!1},n={x:0,y:0,zoom:0},a=!1,i=(e,n)=>{switch(e.code){case"KeyW":t.forward=n,e.preventDefault(),e.stopPropagation();break;case"KeyS":t.backward=n,e.preventDefault(),e.stopPropagation();break;case"KeyA":t.left=n,e.preventDefault(),e.stopPropagation();break;case"KeyD":t.right=n,e.preventDefault(),e.stopPropagation();break;case"Space":t.up=n,e.preventDefault(),e.stopPropagation();break;case"ShiftLeft":case"ControlLeft":case"KeyC":t.down=n,e.preventDefault(),e.stopPropagation()}};return e.addEventListener("keydown",e=>i(e,!0)),e.addEventListener("keyup",e=>i(e,!1)),e.addEventListener("mousedown",()=>{a=!0}),e.addEventListener("mouseup",()=>{a=!1}),e.addEventListener("mousemove",e=>{(a=(1&e.buttons)!=0)&&(n.x+=e.movementX,n.y+=e.movementY)}),e.addEventListener("wheel",e=>{(a=(1&e.buttons)!=0)&&(n.zoom+=Math.sign(e.deltaY),e.preventDefault(),e.stopPropagation())},{passive:!1}),()=>{let e={digital:t,analog:{x:n.x,y:n.y,zoom:n.zoom,touching:a}};return n.x=0,n.y=0,n.zoom=0,e}}(window),m=a.R3.create(3,2,5),p={arcball:new u({position:m}),WASD:new l({position:m})},h={type:"arcball"},d=h.type;s.add(h,"type",["arcball","WASD"]).onChange(()=>{let e=h.type;p[e].matrix=p[d].matrix,d=e});let f=await navigator.gpu.requestAdapter(),g=await f.requestDevice(),v=n.getContext("webgpu"),x=window.devicePixelRatio;n.width=n.clientWidth*x,n.height=n.clientHeight*x;let b=navigator.gpu.getPreferredCanvasFormat();v.configure({device:g,format:b,alphaMode:"premultiplied"});let w=g.createBuffer({size:r.zS.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(w.getMappedRange()).set(r.zS),w.unmap();let y=g.createRenderPipeline({layout:"auto",vertex:{module:g.createShaderModule({code:o}),entryPoint:"vertex_main",buffers:[{arrayStride:r.O$,attributes:[{shaderLocation:0,offset:r.v8,format:"float32x4"},{shaderLocation:1,offset:r.Ax,format:"float32x2"}]}]},fragment:{module:g.createShaderModule({code:o}),entryPoint:"fragment_main",targets:[{format:b}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),_=g.createTexture({size:[n.width,n.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),S=g.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let C=await fetch("/assets/img/Di-3d.png"),R=await createImageBitmap(await C.blob());t=g.createTexture({size:[R.width,R.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),g.queue.copyExternalImageToTexture({source:R},{texture:t},[R.width,R.height])}let T=g.createSampler({magFilter:"linear",minFilter:"linear"}),P=g.createBindGroup({layout:y.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:S}},{binding:1,resource:T},{binding:2,resource:t.createView()}]}),A={colorAttachments:[{view:void 0,clearValue:{r:.5,g:.5,b:.5,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:_.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},V=n.width/n.height,M=a._E.perspective(2*Math.PI/5,V,1,100),E=a._E.create(),k=Date.now();requestAnimationFrame(function e(){let t=Date.now(),n=(t-k)/1e3;if(k=t,!i.active)return;let o=function(e){let t=p[h.type],n=t.update(e,c());return a._E.multiply(M,n,E),E}(n);g.queue.writeBuffer(S,0,o.buffer,o.byteOffset,o.byteLength),A.colorAttachments[0].view=v.getCurrentTexture().createView();let s=g.createCommandEncoder(),l=s.beginRenderPass(A);l.setPipeline(y),l.setBindGroup(0,P),l.setVertexBuffer(0,w),l.draw(r.MO),l.end(),g.queue.submit([s.finish()]),requestAnimationFrame(e)})},d=()=>(0,i.Tl)({name:"Cameras",description:"This example provides example camera implementations",gui:!0,init:h,sources:[{name:p.substring(19),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\nimport cubeWGSL from './cube.wgsl';\nimport { ArcballCamera, WASDCamera, cameraSourceInfo } from './camera';\nimport { createInputHandler, inputSourceInfo } from './input';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n if (!pageState.active) {\n return;\n }\n\n // The input handler\n const inputHandler = createInputHandler(window);\n\n // The camera types\n const initialCameraPosition = vec3.create(3, 2, 5);\n const cameras = {\n arcball: new ArcballCamera({ position: initialCameraPosition }),\n WASD: new WASDCamera({ position: initialCameraPosition }),\n };\n\n // GUI parameters\n const params: { type: 'arcball' | 'WASD' } = {\n type: 'arcball',\n };\n\n // Callback handler for camera mode\n let oldCameraType = params.type;\n gui.add(params, 'type', ['arcball', 'WASD']).onChange(() => {\n // Copy the camera matrix from old to new\n const newCameraType = params.type;\n cameras[newCameraType].matrix = cameras[oldCameraType].matrix;\n oldCameraType = newCameraType;\n });\n\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: cubeWGSL,\n }),\n entryPoint: 'vertex_main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: cubeWGSL,\n }),\n entryPoint: 'fragment_main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n cullMode: 'back',\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let cubeTexture: GPUTexture;\n {\n const response = await fetch('/assets/img/Di-3d.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n cubeTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubeTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: cubeTexture.createView(),\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n function getModelViewProjectionMatrix(deltaTime: number) {\n const camera = cameras[params.type];\n const viewMatrix = camera.update(deltaTime, inputHandler());\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n return modelViewProjectionMatrix as Float32Array;\n }\n\n let lastFrameMS = Date.now();\n\n function frame() {\n const now = Date.now();\n const deltaTime = (now - lastFrameMS) / 1000;\n lastFrameMS = now;\n\n if (!pageState.active) {\n // Sample is no longer the active page.\n return;\n }\n\n const modelViewProjection = getModelViewProjectionMatrix(deltaTime);\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n modelViewProjection.buffer,\n modelViewProjection.byteOffset,\n modelViewProjection.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst TexturedCube: () => JSX.Element = () =>\n makeSample({\n name: 'Cameras',\n description: 'This example provides example camera implementations',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n cameraSourceInfo,\n inputSourceInfo,\n {\n name: '../../shaders/cube.wgsl',\n contents: cubeWGSL,\n editable: true,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default TexturedCube;\n"},s,m,{name:"../../shaders/cube.wgsl",contents:o,editable:!0},{name:"../../meshes/cube.ts",contents:n(2448).Z}],filename:p});var f=d},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,t){"use strict";t.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"}}]); \ No newline at end of file +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[878],{5671:function(e,t,n){"use strict";n.d(t,{Tl:function(){return m},hu:function(){return p}});var a=n(5893),i=n(9008),r=n.n(i),o=n(1163),s=n(7294),c=n(9147),l=n.n(c);n(7319);let u=e=>{let t=(0,s.useRef)(null),i=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:t,contents:i}=e;return{name:t,...function(e){let t;let i=null;{i=document.createElement("div");let r=n(4631);t=r(i,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(n){return(0,a.jsx)("div",{...n,children:(0,a.jsx)("div",{ref(n){i&&n&&(n.appendChild(i),t.setOption("value",e))}})})}}}(i)}}),e.sources),c=(0,s.useRef)(null),u=(0,s.useMemo)(()=>{if(e.gui){let t=n(4376);return new t.GUI({autoPlace:!1})}},[]),m=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let t=n(2792);return new t}},[]),h=(0,o.useRouter)(),d=h.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[f,g]=(0,s.useState)(null),[v,x]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(d?x(d[1]):x(i[0].name),u&&c.current)for(c.current.appendChild(u.domElement);u.__controllers.length>0;)u.__controllers[0].remove();p&&m.current&&(p.dom.style.position="absolute",p.showPanel(1),m.current.appendChild(p.dom));let n={active:!0},a=()=>{n.active=!1};try{let r=t.current;if(!r)throw Error("The canvas is not available");let o=e.init({canvas:r,pageState:n,gui:u,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),g(e)})}catch(s){console.error(s),g(s)}return a},[]),(0,a.jsxs)("main",{children:[(0,a.jsxs)(r(),{children:[(0,a.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,a.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,a.jsx)("meta",{name:"description",content:e.description}),(0,a.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("h1",{children:e.name}),(0,a.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,a.jsx)("p",{children:e.description}),f?(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,a.jsx)("p",{children:"".concat(f)})]}):null]}),(0,a.jsxs)("div",{className:l().canvasContainer,children:[(0,a.jsx)("div",{style:{position:"absolute",left:10},ref:m}),(0,a.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,a.jsx)("canvas",{ref:t})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("nav",{className:l().sourceFileNav,children:(0,a.jsx)("ul",{children:i.map((e,t)=>(0,a.jsx)("li",{children:(0,a.jsx)("a",{href:"#".concat(e.name),"data-active":v==e.name,onClick(){x(e.name)},children:e.name})},t))})}),i.map((e,t)=>(0,a.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":v==e.name},t))]})]})},m=e=>(0,a.jsx)(u,{...e});function p(e,t){if(!e)throw Error(t)}},4655:function(e,t,n){"use strict";n.d(t,{Ax:function(){return r},MO:function(){return o},O$:function(){return a},v8:function(){return i},zS:function(){return s}});let a=40,i=0,r=32,o=36,s=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0])},3878:function(e,t,n){"use strict";n.r(t),n.d(t,{default:function(){return f}});var a=n(6416),i=n(5671),r=n(4655),o="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n\n@group(0) @binding(0) var uniforms : Uniforms;\n@group(0) @binding(1) var mySampler: sampler;\n@group(0) @binding(2) var myTexture: texture_2d;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4f,\n @location(0) fragUV : vec2f,\n}\n\n@vertex\nfn vertex_main(\n @location(0) position : vec4f,\n @location(1) uv : vec2f\n) -> VertexOutput {\n return VertexOutput(uniforms.modelViewProjectionMatrix * position, uv);\n}\n\n@fragment\nfn fragment_main(@location(0) fragUV: vec2f) -> @location(0) vec4f {\n return textureSample(myTexture, mySampler, fragUV);\n}\n";let s={name:"src/sample/cameras/camera.ts".substring(19),contents:"// Note: The code in this file does not use the 'dst' output parameter of functions in the\n// 'wgpu-matrix' library, so produces many temporary vectors and matrices.\n// This is intentional, as this sample prefers readability over performance.\nimport { Mat4, Vec3, Vec4, mat4, vec3 } from 'wgpu-matrix';\nimport Input from './input';\n\n// Information about this file, used by the sample UI\nexport const cameraSourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n};\n\n// Common interface for camera implementations\nexport default interface Camera {\n // update updates the camera using the user-input and returns the view matrix.\n update(delta_time: number, input: Input): Mat4;\n\n // The camera matrix.\n // This is the inverse of the view matrix.\n matrix: Mat4;\n // Alias to column vector 0 of the camera matrix.\n right: Vec4;\n // Alias to column vector 1 of the camera matrix.\n up: Vec4;\n // Alias to column vector 2 of the camera matrix.\n back: Vec4;\n // Alias to column vector 3 of the camera matrix.\n position: Vec4;\n}\n\n// The common functionality between camera implementations\nclass CameraBase {\n // The camera matrix\n private matrix_ = new Float32Array([\n 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n ]);\n\n // The calculated view matrix\n private readonly view_ = mat4.create();\n\n // Aliases to column vectors of the matrix\n private right_ = new Float32Array(this.matrix_.buffer, 4 * 0, 4);\n private up_ = new Float32Array(this.matrix_.buffer, 4 * 4, 4);\n private back_ = new Float32Array(this.matrix_.buffer, 4 * 8, 4);\n private position_ = new Float32Array(this.matrix_.buffer, 4 * 12, 4);\n\n // Returns the camera matrix\n get matrix() {\n return this.matrix_;\n }\n // Assigns `mat` to the camera matrix\n set matrix(mat: Mat4) {\n mat4.copy(mat, this.matrix_);\n }\n\n // Returns the camera view matrix\n get view() {\n return this.view_;\n }\n // Assigns `mat` to the camera view\n set view(mat: Mat4) {\n mat4.copy(mat, this.view_);\n }\n\n // Returns column vector 0 of the camera matrix\n get right() {\n return this.right_;\n }\n // Assigns `vec` to the first 3 elements of column vector 0 of the camera matrix\n set right(vec: Vec3) {\n vec3.copy(vec, this.right_);\n }\n\n // Returns column vector 1 of the camera matrix\n get up() {\n return this.up_;\n }\n // Assigns `vec` to the first 3 elements of column vector 1 of the camera matrix\n set up(vec: Vec3) {\n vec3.copy(vec, this.up_);\n }\n\n // Returns column vector 2 of the camera matrix\n get back() {\n return this.back_;\n }\n // Assigns `vec` to the first 3 elements of column vector 2 of the camera matrix\n set back(vec: Vec3) {\n vec3.copy(vec, this.back_);\n }\n\n // Returns column vector 3 of the camera matrix\n get position() {\n return this.position_;\n }\n // Assigns `vec` to the first 3 elements of column vector 3 of the camera matrix\n set position(vec: Vec3) {\n vec3.copy(vec, this.position_);\n }\n}\n\n// WASDCamera is a camera implementation that behaves similar to first-person-shooter PC games.\nexport class WASDCamera extends CameraBase implements Camera {\n // The camera absolute pitch angle\n private pitch = 0;\n // The camera absolute yaw angle\n private yaw = 0;\n\n // The movement veloicty\n private readonly velocity_ = vec3.create();\n\n // Speed multiplier for camera movement\n movementSpeed = 10;\n\n // Speed multiplier for camera rotation\n rotationSpeed = 1;\n\n // Movement velocity drag coeffient [0 .. 1]\n // 0: Continues forever\n // 1: Instantly stops moving\n frictionCoefficient = 0.99;\n\n // Returns velocity vector\n get velocity() {\n return this.velocity_;\n }\n // Assigns `vec` to the velocity vector\n set velocity(vec: Vec3) {\n vec3.copy(vec, this.velocity_);\n }\n\n // Construtor\n constructor(options?: {\n // The initial position of the camera\n position?: Vec3;\n // The initial target of the camera\n target?: Vec3;\n }) {\n super();\n if (options && (options.position || options.target)) {\n const position = options.position ?? vec3.create(0, 0, -5);\n const target = options.target ?? vec3.create(0, 0, 0);\n const forward = vec3.normalize(vec3.sub(target, position));\n this.recalculateAngles(forward);\n this.position = position;\n }\n }\n\n // Returns the camera matrix\n get matrix() {\n return super.matrix;\n }\n\n // Assigns `mat` to the camera matrix, and recalcuates the camera angles\n set matrix(mat: Mat4) {\n super.matrix = mat;\n this.recalculateAngles(this.back);\n }\n\n update(deltaTime: number, input: Input): Mat4 {\n const sign = (positive: boolean, negative: boolean) =>\n (positive ? 1 : 0) - (negative ? 1 : 0);\n\n // Apply the delta rotation to the pitch and yaw angles\n this.yaw -= input.analog.x * deltaTime * this.rotationSpeed;\n this.pitch -= input.analog.y * deltaTime * this.rotationSpeed;\n\n // Wrap yaw between [0\xb0 .. 360\xb0], just to prevent large accumulation.\n this.yaw = mod(this.yaw, Math.PI * 2);\n // Clamp pitch between [-90\xb0 .. +90\xb0] to prevent somersaults.\n this.pitch = clamp(this.pitch, -Math.PI / 2, Math.PI / 2);\n\n // Save the current position, as we're about to rebuild the camera matrix.\n const position = vec3.copy(this.position);\n\n // Reconstruct the camera's rotation, and store into the camera matrix.\n super.matrix = mat4.rotateX(mat4.rotationY(this.yaw), this.pitch);\n\n // Calculate the new target velocity\n const digital = input.digital;\n const deltaRight = sign(digital.right, digital.left);\n const deltaUp = sign(digital.up, digital.down);\n const targetVelocity = vec3.create();\n const deltaBack = sign(digital.backward, digital.forward);\n vec3.addScaled(targetVelocity, this.right, deltaRight, targetVelocity);\n vec3.addScaled(targetVelocity, this.up, deltaUp, targetVelocity);\n vec3.addScaled(targetVelocity, this.back, deltaBack, targetVelocity);\n vec3.normalize(targetVelocity, targetVelocity);\n vec3.mulScalar(targetVelocity, this.movementSpeed, targetVelocity);\n\n // Mix new target velocity\n this.velocity = lerp(\n targetVelocity,\n this.velocity,\n Math.pow(1 - this.frictionCoefficient, deltaTime)\n );\n\n // Integrate velocity to calculate new position\n this.position = vec3.addScaled(position, this.velocity, deltaTime);\n\n // Invert the camera matrix to build the view matrix\n this.view = mat4.invert(this.matrix);\n return this.view;\n }\n\n // Recalculates the yaw and pitch values from a directional vector\n recalculateAngles(dir: Vec3) {\n this.yaw = Math.atan2(dir[0], dir[2]);\n this.pitch = -Math.asin(dir[1]);\n }\n}\n\n// ArcballCamera implements a basic orbiting camera around the world origin\nexport class ArcballCamera extends CameraBase implements Camera {\n // The camera distance from the target\n private distance = 0;\n\n // The current angular velocity\n private angularVelocity = 0;\n\n // The current rotation axis\n private axis_ = vec3.create();\n\n // Returns the rotation axis\n get axis() {\n return this.axis_;\n }\n // Assigns `vec` to the rotation axis\n set axis(vec: Vec3) {\n vec3.copy(vec, this.axis_);\n }\n\n // Speed multiplier for camera rotation\n rotationSpeed = 1;\n\n // Speed multiplier for camera zoom\n zoomSpeed = 0.1;\n\n // Rotation velocity drag coeffient [0 .. 1]\n // 0: Spins forever\n // 1: Instantly stops spinning\n frictionCoefficient = 0.999;\n\n // Construtor\n constructor(options?: {\n // The initial position of the camera\n position?: Vec3;\n }) {\n super();\n if (options && options.position) {\n this.position = options.position;\n this.distance = vec3.len(this.position);\n this.back = vec3.normalize(this.position);\n this.recalcuateRight();\n this.recalcuateUp();\n }\n }\n\n // Returns the camera matrix\n get matrix() {\n return super.matrix;\n }\n\n // Assigns `mat` to the camera matrix, and recalcuates the distance\n set matrix(mat: Mat4) {\n super.matrix = mat;\n this.distance = vec3.len(this.position);\n }\n\n update(deltaTime: number, input: Input): Mat4 {\n const epsilon = 0.0000001;\n\n if (input.analog.touching) {\n // Currently being dragged.\n this.angularVelocity = 0;\n } else {\n // Dampen any existing angular velocity\n this.angularVelocity *= Math.pow(1 - this.frictionCoefficient, deltaTime);\n }\n\n // Calculate the movement vector\n const movement = vec3.create();\n vec3.addScaled(movement, this.right, input.analog.x, movement);\n vec3.addScaled(movement, this.up, -input.analog.y, movement);\n\n // Cross the movement vector with the view direction to calculate the rotation axis x magnitude\n const crossProduct = vec3.cross(movement, this.back);\n\n // Calculate the magnitude of the drag\n const magnitude = vec3.len(crossProduct);\n\n if (magnitude > epsilon) {\n // Normalize the crossProduct to get the rotation axis\n this.axis = vec3.scale(crossProduct, 1 / magnitude);\n\n // Remember the current angular velocity. This is used when the touch is released for a fling.\n this.angularVelocity = magnitude * this.rotationSpeed;\n }\n\n // The rotation around this.axis to apply to the camera matrix this update\n const rotationAngle = this.angularVelocity * deltaTime;\n if (rotationAngle > epsilon) {\n // Rotate the matrix around axis\n // Note: The rotation is not done as a matrix-matrix multiply as the repeated multiplications\n // will quickly introduce substantial error into the matrix.\n this.back = vec3.normalize(rotate(this.back, this.axis, rotationAngle));\n this.recalcuateRight();\n this.recalcuateUp();\n }\n\n // recalculate `this.position` from `this.back` considering zoom\n if (input.analog.zoom !== 0) {\n this.distance *= 1 + input.analog.zoom * this.zoomSpeed;\n }\n this.position = vec3.scale(this.back, this.distance);\n\n // Invert the camera matrix to build the view matrix\n this.view = mat4.invert(this.matrix);\n return this.view;\n }\n\n // Assigns `this.right` with the cross product of `this.up` and `this.back`\n recalcuateRight() {\n this.right = vec3.normalize(vec3.cross(this.up, this.back));\n }\n\n // Assigns `this.up` with the cross product of `this.back` and `this.right`\n recalcuateUp() {\n this.up = vec3.normalize(vec3.cross(this.back, this.right));\n }\n}\n\n// Returns `x` clamped between [`min` .. `max`]\nfunction clamp(x: number, min: number, max: number): number {\n return Math.min(Math.max(x, min), max);\n}\n\n// Returns `x` float-modulo `div`\nfunction mod(x: number, div: number): number {\n return x - Math.floor(Math.abs(x) / div) * div * Math.sign(x);\n}\n\n// Returns `vec` rotated `angle` radians around `axis`\nfunction rotate(vec: Vec3, axis: Vec3, angle: number): Vec3 {\n return vec3.transformMat4Upper3x3(vec, mat4.rotation(axis, angle));\n}\n\n// Returns the linear interpolation between 'a' and 'b' using 's'\nfunction lerp(a: Vec3, b: Vec3, s: number): Vec3 {\n return vec3.addScaled(a, vec3.sub(b, a), s);\n}\n"};class c{get matrix(){return this.matrix_}set matrix(e){a._E.copy(e,this.matrix_)}get view(){return this.view_}set view(e){a._E.copy(e,this.view_)}get right(){return this.right_}set right(e){a.R3.copy(e,this.right_)}get up(){return this.up_}set up(e){a.R3.copy(e,this.up_)}get back(){return this.back_}set back(e){a.R3.copy(e,this.back_)}get position(){return this.position_}set position(e){a.R3.copy(e,this.position_)}constructor(){this.matrix_=new Float32Array([1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]),this.view_=a._E.create(),this.right_=new Float32Array(this.matrix_.buffer,0,4),this.up_=new Float32Array(this.matrix_.buffer,16,4),this.back_=new Float32Array(this.matrix_.buffer,32,4),this.position_=new Float32Array(this.matrix_.buffer,48,4)}}class l extends c{get velocity(){return this.velocity_}set velocity(e){a.R3.copy(e,this.velocity_)}get matrix(){return super.matrix}set matrix(e){super.matrix=e,this.recalculateAngles(this.back)}update(e,t){var n,i,r,o;let s=(e,t)=>(e?1:0)-(t?1:0);this.yaw-=t.analog.x*e*this.rotationSpeed,this.pitch-=t.analog.y*e*this.rotationSpeed,this.yaw=(n=this.yaw)-Math.floor(Math.abs(n)/(i=2*Math.PI))*i*Math.sign(n),this.pitch=Math.min(Math.max(this.pitch,-Math.PI/2),Math.PI/2);let c=a.R3.copy(this.position);super.matrix=a._E.rotateX(a._E.rotationY(this.yaw),this.pitch);let l=t.digital,u=s(l.right,l.left),m=s(l.up,l.down),p=a.R3.create(),h=s(l.backward,l.forward);return a.R3.addScaled(p,this.right,u,p),a.R3.addScaled(p,this.up,m,p),a.R3.addScaled(p,this.back,h,p),a.R3.normalize(p,p),a.R3.mulScalar(p,this.movementSpeed,p),this.velocity=(r=this.velocity,o=Math.pow(1-this.frictionCoefficient,e),a.R3.addScaled(p,a.R3.sub(r,p),o)),this.position=a.R3.addScaled(c,this.velocity,e),this.view=a._E.invert(this.matrix),this.view}recalculateAngles(e){this.yaw=Math.atan2(e[0],e[2]),this.pitch=-Math.asin(e[1])}constructor(e){if(super(),this.pitch=0,this.yaw=0,this.velocity_=a.R3.create(),this.movementSpeed=10,this.rotationSpeed=1,this.frictionCoefficient=.99,e&&(e.position||e.target)){var t,n;let i=null!==(t=e.position)&&void 0!==t?t:a.R3.create(0,0,-5),r=null!==(n=e.target)&&void 0!==n?n:a.R3.create(0,0,0),o=a.R3.normalize(a.R3.sub(r,i));this.recalculateAngles(o),this.position=i}}}class u extends c{get axis(){return this.axis_}set axis(e){a.R3.copy(e,this.axis_)}get matrix(){return super.matrix}set matrix(e){super.matrix=e,this.distance=a.R3.len(this.position)}update(e,t){var n,i;t.analog.touching?this.angularVelocity=0:this.angularVelocity*=Math.pow(1-this.frictionCoefficient,e);let r=a.R3.create();a.R3.addScaled(r,this.right,t.analog.x,r),a.R3.addScaled(r,this.up,-t.analog.y,r);let o=a.R3.cross(r,this.back),s=a.R3.len(o);s>1e-7&&(this.axis=a.R3.scale(o,1/s),this.angularVelocity=s*this.rotationSpeed);let c=this.angularVelocity*e;return c>1e-7&&(this.back=a.R3.normalize((n=this.back,i=this.axis,a.R3.transformMat4Upper3x3(n,a._E.rotation(i,c)))),this.recalcuateRight(),this.recalcuateUp()),0!==t.analog.zoom&&(this.distance*=1+t.analog.zoom*this.zoomSpeed),this.position=a.R3.scale(this.back,this.distance),this.view=a._E.invert(this.matrix),this.view}recalcuateRight(){this.right=a.R3.normalize(a.R3.cross(this.up,this.back))}recalcuateUp(){this.up=a.R3.normalize(a.R3.cross(this.back,this.right))}constructor(e){super(),this.distance=0,this.angularVelocity=0,this.axis_=a.R3.create(),this.rotationSpeed=1,this.zoomSpeed=.1,this.frictionCoefficient=.999,e&&e.position&&(this.position=e.position,this.distance=a.R3.len(this.position),this.back=a.R3.normalize(this.position),this.recalcuateRight(),this.recalcuateUp())}}let m={name:"src/sample/cameras/input.ts".substring(19),contents:"// Information about this file, used by the sample UI\nexport const inputSourceInfo = {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n};\n\n// Input holds as snapshot of input state\nexport default interface Input {\n // Digital input (e.g keyboard state)\n readonly digital: {\n readonly forward: boolean;\n readonly backward: boolean;\n readonly left: boolean;\n readonly right: boolean;\n readonly up: boolean;\n readonly down: boolean;\n };\n // Analog input (e.g mouse, touchscreen)\n readonly analog: {\n readonly x: number;\n readonly y: number;\n readonly zoom: number;\n readonly touching: boolean;\n };\n}\n\n// InputHandler is a function that when called, returns the current Input state.\nexport type InputHandler = () => Input;\n\n// createInputHandler returns an InputHandler by attaching event handlers to the window.\nexport function createInputHandler(window: Window): InputHandler {\n const digital = {\n forward: false,\n backward: false,\n left: false,\n right: false,\n up: false,\n down: false,\n };\n const analog = {\n x: 0,\n y: 0,\n zoom: 0,\n };\n let mouseDown = false;\n\n const setDigital = (e: KeyboardEvent, value: boolean) => {\n switch (e.code) {\n case 'KeyW':\n digital.forward = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'KeyS':\n digital.backward = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'KeyA':\n digital.left = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'KeyD':\n digital.right = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'Space':\n digital.up = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n case 'ShiftLeft':\n case 'ControlLeft':\n case 'KeyC':\n digital.down = value;\n e.preventDefault();\n e.stopPropagation();\n break;\n }\n };\n\n window.addEventListener('keydown', (e) => setDigital(e, true));\n window.addEventListener('keyup', (e) => setDigital(e, false));\n window.addEventListener('mousedown', () => {\n mouseDown = true;\n });\n window.addEventListener('mouseup', () => {\n mouseDown = false;\n });\n window.addEventListener('mousemove', (e) => {\n mouseDown = (e.buttons & 1) !== 0;\n if (mouseDown) {\n analog.x += e.movementX;\n analog.y += e.movementY;\n }\n });\n window.addEventListener(\n 'wheel',\n (e) => {\n mouseDown = (e.buttons & 1) !== 0;\n if (mouseDown) {\n // The scroll value varies substantially between user agents / browsers.\n // Just use the sign.\n analog.zoom += Math.sign(e.deltaY);\n e.preventDefault();\n e.stopPropagation();\n }\n },\n { passive: false }\n );\n\n return () => {\n const out = {\n digital,\n analog: {\n x: analog.x,\n y: analog.y,\n zoom: analog.zoom,\n touching: mouseDown,\n },\n };\n // Clear the analog values, as these accumulate.\n analog.x = 0;\n analog.y = 0;\n analog.zoom = 0;\n return out;\n };\n}\n"};var p="src/sample/cameras/main.ts";let h=async e=>{let t,{canvas:n,pageState:i,gui:s}=e;if(!i.active)return;let c=function(e){let t={forward:!1,backward:!1,left:!1,right:!1,up:!1,down:!1},n={x:0,y:0,zoom:0},a=!1,i=(e,n)=>{switch(e.code){case"KeyW":t.forward=n,e.preventDefault(),e.stopPropagation();break;case"KeyS":t.backward=n,e.preventDefault(),e.stopPropagation();break;case"KeyA":t.left=n,e.preventDefault(),e.stopPropagation();break;case"KeyD":t.right=n,e.preventDefault(),e.stopPropagation();break;case"Space":t.up=n,e.preventDefault(),e.stopPropagation();break;case"ShiftLeft":case"ControlLeft":case"KeyC":t.down=n,e.preventDefault(),e.stopPropagation()}};return e.addEventListener("keydown",e=>i(e,!0)),e.addEventListener("keyup",e=>i(e,!1)),e.addEventListener("mousedown",()=>{a=!0}),e.addEventListener("mouseup",()=>{a=!1}),e.addEventListener("mousemove",e=>{(a=(1&e.buttons)!=0)&&(n.x+=e.movementX,n.y+=e.movementY)}),e.addEventListener("wheel",e=>{(a=(1&e.buttons)!=0)&&(n.zoom+=Math.sign(e.deltaY),e.preventDefault(),e.stopPropagation())},{passive:!1}),()=>{let e={digital:t,analog:{x:n.x,y:n.y,zoom:n.zoom,touching:a}};return n.x=0,n.y=0,n.zoom=0,e}}(window),m=a.R3.create(3,2,5),p={arcball:new u({position:m}),WASD:new l({position:m})},h={type:"arcball"},d=h.type;s.add(h,"type",["arcball","WASD"]).onChange(()=>{let e=h.type;p[e].matrix=p[d].matrix,d=e});let f=await navigator.gpu.requestAdapter(),g=await f.requestDevice(),v=n.getContext("webgpu"),x=window.devicePixelRatio;n.width=n.clientWidth*x,n.height=n.clientHeight*x;let b=navigator.gpu.getPreferredCanvasFormat();v.configure({device:g,format:b,alphaMode:"premultiplied"});let w=g.createBuffer({size:r.zS.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(w.getMappedRange()).set(r.zS),w.unmap();let y=g.createRenderPipeline({layout:"auto",vertex:{module:g.createShaderModule({code:o}),entryPoint:"vertex_main",buffers:[{arrayStride:r.O$,attributes:[{shaderLocation:0,offset:r.v8,format:"float32x4"},{shaderLocation:1,offset:r.Ax,format:"float32x2"}]}]},fragment:{module:g.createShaderModule({code:o}),entryPoint:"fragment_main",targets:[{format:b}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),_=g.createTexture({size:[n.width,n.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),S=g.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let C=await fetch("../assets/img/Di-3d.png"),R=await createImageBitmap(await C.blob());t=g.createTexture({size:[R.width,R.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT}),g.queue.copyExternalImageToTexture({source:R},{texture:t},[R.width,R.height])}let T=g.createSampler({magFilter:"linear",minFilter:"linear"}),P=g.createBindGroup({layout:y.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:S}},{binding:1,resource:T},{binding:2,resource:t.createView()}]}),A={colorAttachments:[{view:void 0,clearValue:{r:.5,g:.5,b:.5,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:_.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},V=n.width/n.height,M=a._E.perspective(2*Math.PI/5,V,1,100),E=a._E.create(),k=Date.now();requestAnimationFrame(function e(){let t=Date.now(),n=(t-k)/1e3;if(k=t,!i.active)return;let o=function(e){let t=p[h.type],n=t.update(e,c());return a._E.multiply(M,n,E),E}(n);g.queue.writeBuffer(S,0,o.buffer,o.byteOffset,o.byteLength),A.colorAttachments[0].view=v.getCurrentTexture().createView();let s=g.createCommandEncoder(),l=s.beginRenderPass(A);l.setPipeline(y),l.setBindGroup(0,P),l.setVertexBuffer(0,w),l.draw(r.MO),l.end(),g.queue.submit([s.finish()]),requestAnimationFrame(e)})},d=()=>(0,i.Tl)({name:"Cameras",description:"This example provides example camera implementations",gui:!0,init:h,sources:[{name:p.substring(19),contents:"import { mat4, vec3 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\nimport cubeWGSL from './cube.wgsl';\nimport { ArcballCamera, WASDCamera, cameraSourceInfo } from './camera';\nimport { createInputHandler, inputSourceInfo } from './input';\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n if (!pageState.active) {\n return;\n }\n\n // The input handler\n const inputHandler = createInputHandler(window);\n\n // The camera types\n const initialCameraPosition = vec3.create(3, 2, 5);\n const cameras = {\n arcball: new ArcballCamera({ position: initialCameraPosition }),\n WASD: new WASDCamera({ position: initialCameraPosition }),\n };\n\n // GUI parameters\n const params: { type: 'arcball' | 'WASD' } = {\n type: 'arcball',\n };\n\n // Callback handler for camera mode\n let oldCameraType = params.type;\n gui.add(params, 'type', ['arcball', 'WASD']).onChange(() => {\n // Copy the camera matrix from old to new\n const newCameraType = params.type;\n cameras[newCameraType].matrix = cameras[oldCameraType].matrix;\n oldCameraType = newCameraType;\n });\n\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: cubeWGSL,\n }),\n entryPoint: 'vertex_main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: cubeWGSL,\n }),\n entryPoint: 'fragment_main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n cullMode: 'back',\n },\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let cubeTexture: GPUTexture;\n {\n const response = await fetch('../assets/img/Di-3d.png');\n const imageBitmap = await createImageBitmap(await response.blob());\n\n cubeTexture = device.createTexture({\n size: [imageBitmap.width, imageBitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: imageBitmap },\n { texture: cubeTexture },\n [imageBitmap.width, imageBitmap.height]\n );\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n {\n binding: 1,\n resource: sampler,\n },\n {\n binding: 2,\n resource: cubeTexture.createView(),\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n function getModelViewProjectionMatrix(deltaTime: number) {\n const camera = cameras[params.type];\n const viewMatrix = camera.update(deltaTime, inputHandler());\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n return modelViewProjectionMatrix as Float32Array;\n }\n\n let lastFrameMS = Date.now();\n\n function frame() {\n const now = Date.now();\n const deltaTime = (now - lastFrameMS) / 1000;\n lastFrameMS = now;\n\n if (!pageState.active) {\n // Sample is no longer the active page.\n return;\n }\n\n const modelViewProjection = getModelViewProjectionMatrix(deltaTime);\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n modelViewProjection.buffer,\n modelViewProjection.byteOffset,\n modelViewProjection.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst TexturedCube: () => JSX.Element = () =>\n makeSample({\n name: 'Cameras',\n description: 'This example provides example camera implementations',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n cameraSourceInfo,\n inputSourceInfo,\n {\n name: '../../shaders/cube.wgsl',\n contents: cubeWGSL,\n editable: true,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default TexturedCube;\n"},s,m,{name:"../../shaders/cube.wgsl",contents:o,editable:!0},{name:"../../meshes/cube.ts",contents:n(2448).Z}],filename:p});var f=d},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,t){"use strict";t.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/webpack-dee32cb39c7d6840.js b/_next/static/chunks/webpack-b64fa792331afcf2.js similarity index 52% rename from _next/static/chunks/webpack-dee32cb39c7d6840.js rename to _next/static/chunks/webpack-b64fa792331afcf2.js index 1d140861..d960f904 100644 --- a/_next/static/chunks/webpack-dee32cb39c7d6840.js +++ b/_next/static/chunks/webpack-b64fa792331afcf2.js @@ -1 +1 @@ -!function(){"use strict";var e,t,r,n,f,a,o,c,i,u,d={},b={};function l(e){var t=b[e];if(void 0!==t)return t.exports;var r=b[e]={exports:{}},n=!0;try{d[e].call(r.exports,r,r.exports,l),n=!1}finally{n&&delete b[e]}return r.exports}l.m=d,e=[],l.O=function(t,r,n,f){if(r){f=f||0;for(var a=e.length;a>0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var o=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(c=!1,f0&&e[f-1][2]>a;f--)e[f]=e[f-1];e[f]=[r,n,a];return}for(var o=1/0,f=0;f=a&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(c=!1,aWebGPU Samples \ No newline at end of file +WebGPU Samples \ No newline at end of file diff --git a/samples/A-buffer.html b/samples/A-buffer.html index 6d970d28..214c9f41 100644 --- a/samples/A-buffer.html +++ b/samples/A-buffer.html @@ -10,6 +10,6 @@ } A-Buffer - WebGPU Samples

A-Buffer

See it on Github!

Demonstrates order independent transparency using a per-pixel + limiting memory usage (when required)."/>

\ No newline at end of file + limiting memory usage (when required).

\ No newline at end of file diff --git a/samples/animometer.html b/samples/animometer.html index 109f1a63..54565e7f 100644 --- a/samples/animometer.html +++ b/samples/animometer.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Animometer - WebGPU Samples \ No newline at end of file + Animometer - WebGPU Samples \ No newline at end of file diff --git a/samples/bitonicSort.html b/samples/bitonicSort.html index ddd6bcf4..99cb63d2 100644 --- a/samples/bitonicSort.html +++ b/samples/bitonicSort.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file + Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file diff --git a/samples/cameras.html b/samples/cameras.html index 38ecc306..6615f1dc 100644 --- a/samples/cameras.html +++ b/samples/cameras.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cameras - WebGPU Samples \ No newline at end of file + Cameras - WebGPU Samples \ No newline at end of file diff --git a/samples/computeBoids.html b/samples/computeBoids.html index 4e260730..5cc3d7e1 100644 --- a/samples/computeBoids.html +++ b/samples/computeBoids.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Compute Boids - WebGPU Samples \ No newline at end of file + Compute Boids - WebGPU Samples \ No newline at end of file diff --git a/samples/cornell.html b/samples/cornell.html index ef0e9277..421e9e04 100644 --- a/samples/cornell.html +++ b/samples/cornell.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cornell box - WebGPU Samples \ No newline at end of file + Cornell box - WebGPU Samples \ No newline at end of file diff --git a/samples/cubemap.html b/samples/cubemap.html index 7f408e32..ecd59df6 100644 --- a/samples/cubemap.html +++ b/samples/cubemap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cubemap - WebGPU Samples \ No newline at end of file + Cubemap - WebGPU Samples \ No newline at end of file diff --git a/samples/deferredRendering.html b/samples/deferredRendering.html index 2b9c285e..8d83f7b4 100644 --- a/samples/deferredRendering.html +++ b/samples/deferredRendering.html @@ -16,7 +16,7 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. - "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. + "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. Render geometry info to multiple targets in the gBuffers in the first pass. In this sample we have 2 gBuffers for normals and albedo, along with a depth texture. And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity. @@ -24,4 +24,4 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/fractalCube.html b/samples/fractalCube.html index fd0b0ce2..ced8075e 100644 --- a/samples/fractalCube.html +++ b/samples/fractalCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Fractal Cube - WebGPU Samples \ No newline at end of file + Fractal Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/gameOfLife.html b/samples/gameOfLife.html index 13b33d1b..34605d3e 100644 --- a/samples/gameOfLife.html +++ b/samples/gameOfLife.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Conway's Game of Life - WebGPU Samples \ No newline at end of file + Conway's Game of Life - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangle.html b/samples/helloTriangle.html index f8408a39..12179778 100644 --- a/samples/helloTriangle.html +++ b/samples/helloTriangle.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle - WebGPU Samples \ No newline at end of file + Hello Triangle - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangleMSAA.html b/samples/helloTriangleMSAA.html index 0da7ceac..a26e17e4 100644 --- a/samples/helloTriangleMSAA.html +++ b/samples/helloTriangleMSAA.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle MSAA - WebGPU Samples \ No newline at end of file + Hello Triangle MSAA - WebGPU Samples \ No newline at end of file diff --git a/samples/imageBlur.html b/samples/imageBlur.html index b81c96d1..163df691 100644 --- a/samples/imageBlur.html +++ b/samples/imageBlur.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Image Blur - WebGPU Samples \ No newline at end of file + Image Blur - WebGPU Samples \ No newline at end of file diff --git a/samples/instancedCube.html b/samples/instancedCube.html index 5953e7de..15fa21df 100644 --- a/samples/instancedCube.html +++ b/samples/instancedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Instanced Cube - WebGPU Samples \ No newline at end of file + Instanced Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/normalMap.html b/samples/normalMap.html index e356d5e1..c08ccb66 100644 --- a/samples/normalMap.html +++ b/samples/normalMap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Normal Mapping - WebGPU Samples \ No newline at end of file + Normal Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/particles.html b/samples/particles.html index 7669d2aa..bcabe989 100644 --- a/samples/particles.html +++ b/samples/particles.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Particles - WebGPU Samples \ No newline at end of file + Particles - WebGPU Samples \ No newline at end of file diff --git a/samples/renderBundles.html b/samples/renderBundles.html index 3c3ce419..e6fc2b9a 100644 --- a/samples/renderBundles.html +++ b/samples/renderBundles.html @@ -11,7 +11,7 @@ Render Bundles - WebGPU Samples

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of + of instancing to reduce draw overhead.)"/>

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of meshes individually as a proxy for a more complex scene in order to demonstrate the reduction in JavaScript time spent to issue render commands. (Typically a scene like this would make use - of instancing to reduce draw overhead.)

\ No newline at end of file + of instancing to reduce draw overhead.)

\ No newline at end of file diff --git a/samples/resizeCanvas.html b/samples/resizeCanvas.html index 2df8be82..302bb5a6 100644 --- a/samples/resizeCanvas.html +++ b/samples/resizeCanvas.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Resize Canvas - WebGPU Samples \ No newline at end of file + Resize Canvas - WebGPU Samples \ No newline at end of file diff --git a/samples/reversedZ.html b/samples/reversedZ.html index 062c7ae0..613d0c8a 100644 --- a/samples/reversedZ.html +++ b/samples/reversedZ.html @@ -17,7 +17,7 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ - "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. + "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. The left column uses regular method, while the right one uses reversed z technique. Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other. Higher sets are placed further from camera (and are scaled for better visual purpose). @@ -26,4 +26,4 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/rotatingCube.html b/samples/rotatingCube.html index 6e962756..b6942c99 100644 --- a/samples/rotatingCube.html +++ b/samples/rotatingCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Rotating Cube - WebGPU Samples \ No newline at end of file + Rotating Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/samplerParameters.html b/samples/samplerParameters.html index ecc89ca1..98724344 100644 --- a/samples/samplerParameters.html +++ b/samples/samplerParameters.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file + Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file diff --git a/samples/shadowMapping.html b/samples/shadowMapping.html index dd3d09fc..1df50fdf 100644 --- a/samples/shadowMapping.html +++ b/samples/shadowMapping.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Shadow Mapping - WebGPU Samples \ No newline at end of file + Shadow Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/texturedCube.html b/samples/texturedCube.html index c37e2579..c29948bb 100644 --- a/samples/texturedCube.html +++ b/samples/texturedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Textured Cube - WebGPU Samples \ No newline at end of file + Textured Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/twoCubes.html b/samples/twoCubes.html index 18958c67..36fe340f 100644 --- a/samples/twoCubes.html +++ b/samples/twoCubes.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Two Cubes - WebGPU Samples \ No newline at end of file + Two Cubes - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploading.html b/samples/videoUploading.html index 4759deea..f9992404 100644 --- a/samples/videoUploading.html +++ b/samples/videoUploading.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading - WebGPU Samples \ No newline at end of file + Video Uploading - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploadingWebCodecs.html b/samples/videoUploadingWebCodecs.html index 0c523fa4..2b29815d 100644 --- a/samples/videoUploadingWebCodecs.html +++ b/samples/videoUploadingWebCodecs.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file + Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file diff --git a/samples/worker.html b/samples/worker.html index f4987a39..1d76368b 100644 --- a/samples/worker.html +++ b/samples/worker.html @@ -10,6 +10,6 @@ } WebGPU in a Worker - WebGPU Samples

WebGPU in a Worker

See it on Github!

This example shows one method of using WebGPU in a web worker and presenting to + which is then transferred to the worker where all the WebGPU calls are made."/>

\ No newline at end of file + which is then transferred to the worker where all the WebGPU calls are made.

\ No newline at end of file