diff --git a/404.html b/404.html index 77a3ca7e..6d5eaf7e 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/A-buffer.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/A-buffer.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/A-buffer.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/A-buffer.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/animometer.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/animometer.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/animometer.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/animometer.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/bitonicSort.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/bitonicSort.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/bitonicSort.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/bitonicSort.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/cameras.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/cameras.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/cameras.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/cameras.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/computeBoids.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/computeBoids.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/computeBoids.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/computeBoids.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/cornell.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/cornell.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/cornell.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/cornell.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/cubemap.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/cubemap.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/cubemap.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/cubemap.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/deferredRendering.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/deferredRendering.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/deferredRendering.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/deferredRendering.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/fractalCube.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/fractalCube.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/fractalCube.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/fractalCube.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/gameOfLife.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/gameOfLife.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/gameOfLife.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/gameOfLife.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/helloTriangle.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/helloTriangle.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/helloTriangle.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/helloTriangle.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/helloTriangleMSAA.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/helloTriangleMSAA.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/helloTriangleMSAA.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/helloTriangleMSAA.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/imageBlur.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/imageBlur.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/imageBlur.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/imageBlur.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/instancedCube.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/instancedCube.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/instancedCube.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/instancedCube.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/normalMap.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/normalMap.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/normalMap.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/normalMap.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/particles.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/particles.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/particles.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/particles.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/renderBundles.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/renderBundles.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/renderBundles.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/renderBundles.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/resizeCanvas.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/resizeCanvas.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/resizeCanvas.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/resizeCanvas.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/reversedZ.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/reversedZ.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/reversedZ.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/reversedZ.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/rotatingCube.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/rotatingCube.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/rotatingCube.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/rotatingCube.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/samplerParameters.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/samplerParameters.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/samplerParameters.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/samplerParameters.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/shadowMapping.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/shadowMapping.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/shadowMapping.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/shadowMapping.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/texturedCube.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/texturedCube.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/texturedCube.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/texturedCube.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/twoCubes.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/twoCubes.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/twoCubes.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/twoCubes.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/videoUploading.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/videoUploading.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/videoUploading.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/videoUploading.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/videoUploadingWebCodecs.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/videoUploadingWebCodecs.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/videoUploadingWebCodecs.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/videoUploadingWebCodecs.json diff --git a/_next/data/xFpWtHIgZZiYtYujEz2Rj/samples/worker.json b/_next/data/I0F9q9rpYn2lCgp-qIwJG/samples/worker.json similarity index 100% rename from _next/data/xFpWtHIgZZiYtYujEz2Rj/samples/worker.json rename to _next/data/I0F9q9rpYn2lCgp-qIwJG/samples/worker.json diff --git a/_next/static/xFpWtHIgZZiYtYujEz2Rj/_buildManifest.js b/_next/static/I0F9q9rpYn2lCgp-qIwJG/_buildManifest.js similarity index 100% rename from _next/static/xFpWtHIgZZiYtYujEz2Rj/_buildManifest.js rename to _next/static/I0F9q9rpYn2lCgp-qIwJG/_buildManifest.js diff --git a/_next/static/xFpWtHIgZZiYtYujEz2Rj/_ssgManifest.js b/_next/static/I0F9q9rpYn2lCgp-qIwJG/_ssgManifest.js similarity index 100% rename from _next/static/xFpWtHIgZZiYtYujEz2Rj/_ssgManifest.js rename to _next/static/I0F9q9rpYn2lCgp-qIwJG/_ssgManifest.js diff --git a/_next/static/assets/img/brickwall_diffuse.c9ee5359ababda94.png b/_next/static/assets/img/brickwall_diffuse.c9ee5359ababda94.png new file mode 100644 index 00000000..35835088 Binary files /dev/null and b/_next/static/assets/img/brickwall_diffuse.c9ee5359ababda94.png differ diff --git a/_next/static/assets/img/brickwall_height.5e7f3bd0e5c45632.png b/_next/static/assets/img/brickwall_height.5e7f3bd0e5c45632.png new file mode 100644 index 00000000..48ab26fa Binary files /dev/null and b/_next/static/assets/img/brickwall_height.5e7f3bd0e5c45632.png differ diff --git a/_next/static/assets/img/brickwall_normal.12f32d2510fd6264.png b/_next/static/assets/img/brickwall_normal.12f32d2510fd6264.png new file mode 100644 index 00000000..aa6643de Binary files /dev/null and b/_next/static/assets/img/brickwall_normal.12f32d2510fd6264.png differ diff --git a/_next/static/assets/img/spiral_height.0c894e7810776e93.png b/_next/static/assets/img/spiral_height.0c894e7810776e93.png new file mode 100644 index 00000000..1f1680ff Binary files /dev/null and b/_next/static/assets/img/spiral_height.0c894e7810776e93.png differ diff --git a/_next/static/assets/img/spiral_normal.5cdc922342aadd02.png b/_next/static/assets/img/spiral_normal.5cdc922342aadd02.png new file mode 100644 index 00000000..5cba15cf Binary files /dev/null and b/_next/static/assets/img/spiral_normal.5cdc922342aadd02.png differ diff --git a/_next/static/assets/img/toybox_height.826b323f99a3103b.png b/_next/static/assets/img/toybox_height.826b323f99a3103b.png new file mode 100644 index 00000000..9977210b Binary files /dev/null and b/_next/static/assets/img/toybox_height.826b323f99a3103b.png differ diff --git a/_next/static/assets/img/toybox_normal.5758b42f35d39dd7.png b/_next/static/assets/img/toybox_normal.5758b42f35d39dd7.png new file mode 100644 index 00000000..91bcb56a Binary files /dev/null and b/_next/static/assets/img/toybox_normal.5758b42f35d39dd7.png differ diff --git a/_next/static/assets/img/wood_diffuse.bfe4491cf7c50e45.png b/_next/static/assets/img/wood_diffuse.bfe4491cf7c50e45.png new file mode 100644 index 00000000..e28e2aee Binary files /dev/null and b/_next/static/assets/img/wood_diffuse.bfe4491cf7c50e45.png differ diff --git a/_next/static/chunks/118.5795417930498362.js b/_next/static/chunks/118.5795417930498362.js new file mode 100644 index 00000000..a8b91ead --- /dev/null +++ b/_next/static/chunks/118.5795417930498362.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[118],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return d},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),u=t.n(l);t(7319);let c=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),c=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),d=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,o.useRouter)(),g=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[h,f]=(0,s.useState)(null),[x,v]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(g?v(g[1]):v(a[0].name),c&&l.current)for(l.current.appendChild(c.domElement);c.__controllers.length>0;)c.__controllers[0].remove();p&&d.current&&(p.dom.style.position="absolute",p.showPanel(1),d.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:c,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),h?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(h)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:d}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){v(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},d=e=>(0,r.jsx)(c,{...e});function p(e,n){if(!e)throw Error(n)}},7118:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return T}});var r,a,i=t(6416),o=t(5671),s="struct SpaceTransformUniforms {\n projMatrix: mat4x4f,\n viewMatrix: mat4x4f,\n modelMatrix: mat4x4f,\n}\n\nstruct Uniforms_MapInfo {\n mappingType: u32,\n lightPosX: f32,\n lightPosY: f32,\n lightPosZ: f32,\n lightIntensity: f32,\n depthScale: f32,\n depthLayers: f32,\n}\n\nstruct VertexInput {\n // Shader assumes the missing 4th float is 1.0\n @location(0) position : vec4f,\n @location(1) normal : vec3f,\n @location(2) uv : vec2f,\n @location(3) vert_tan: vec3f,\n @location(4) vert_bitan: vec3f,\n}\n\nstruct VertexOutput {\n @builtin(position) Position : vec4f,\n @location(0) normal: vec3f,\n @location(1) uv : vec2f,\n // Vertex position in world space\n @location(2) posWS: vec3f,\n // Vertex position in tangent space\n @location(3) posTS: vec3f,\n // View position in tangent space\n @location(4) viewTS: vec3f,\n // Extracted components of our tbn matrix\n @location(5) tbnTS0: vec3, \n @location(6) tbnTS1: vec3,\n @location(7) tbnTS2: vec3,\n}\n\n// Uniforms\n@group(0) @binding(0) var spaceTransform : SpaceTransformUniforms;\n@group(0) @binding(1) var mapInfo: Uniforms_MapInfo;\n\n// Texture info\n@group(1) @binding(0) var textureSampler: sampler;\n@group(1) @binding(1) var diffuseTexture: texture_2d;\n@group(1) @binding(2) var normalTexture: texture_2d;\n@group(1) @binding(3) var depthTexture: texture_2d;\n\nfn parallax_uv(\n uv: vec2f, \n viewDirTS: vec3f, \n depthSample: f32,\n depthScale: f32,\n) -> vec2f {\n if (mapInfo.mappingType == 4) {\n // Perturb uv coordinates based on depth and camera direction\n let p = viewDirTS.xy * (depthSample * depthScale) / viewDirTS.z;\n return uv - p;\n }\n // Break up depth space into layers\n let depthPerLayer = 1.0 / f32(mapInfo.depthLayers);\n // Start at lowest depth\n var currentDepth = 0.0;\n let delta_uv = viewDirTS.xy * depthScale / (viewDirTS.z * mapInfo.depthLayers);\n var prev_uv = uv;\n var cur_uv = uv;\n\n var depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n var prevDepthFromTexture = depthFromTexture;\n var prevCurrentDepth = currentDepth;\n for (var i: u32 = 0; i < 32; i++) {\n currentDepth += depthPerLayer;\n prev_uv = cur_uv;\n cur_uv -= delta_uv;\n depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n // Determine whether current depth is greater than depth map\n // Once we reach a certain threshold, we stop updating cur_uv\n cur_uv = select(cur_uv, prev_uv, depthFromTexture < currentDepth);\n prevDepthFromTexture = select(depthFromTexture, prevDepthFromTexture, prevDepthFromTexture < currentDepth);\n prevCurrentDepth = select(currentDepth, prevCurrentDepth, prevDepthFromTexture < currentDepth);\n }\n return cur_uv;\n}\n\nfn when_greater(v1: f32, v2: f32) -> f32 {\n return max(sign(v1 - v2), 0.0);\n}\n\n@vertex\nfn vertexMain(input: VertexInput) -> VertexOutput {\n var output : VertexOutput;\n // Create the Model to View Matrix\n let MV = spaceTransform.viewMatrix * spaceTransform.modelMatrix;\n // Create the Model to View to Projection Matrix\n let MVP = spaceTransform.projMatrix * MV;\n \n // Get Clip space transforms and pass through values out of the way\n output.Position = MVP * input.position;\n output.uv = input.uv;\n output.normal = input.normal;\n\n // Multiply pos by modelMatrix to get the vertex/fragment's position in world space\n output.posWS = vec3f((spaceTransform.modelMatrix * input.position).xyz);\n \n var MV3x3 = mat3x3f(\n MV[0].xyz,\n MV[1].xyz,\n MV[2].xyz\n );\n\n // Get unit vectors of normal, tangent, and bitangents in model space\n let vertexTangent = normalize(input.vert_tan);\n let vertexBitangent = normalize(input.vert_bitan);\n let vertexNormal = normalize(input.normal);\n\n // Convert tbn unit vectors to mv space for a model view tbn\n var tbnTS = transpose(\n MV3x3 * mat3x3f(\n vertexTangent,\n vertexBitangent,\n vertexNormal\n )\n );\n // Condense to vec3s so they can be passed to fragment shader\n output.tbnTS0 = tbnTS[0];\n output.tbnTS1 = tbnTS[1];\n output.tbnTS2 = tbnTS[2];\n\n // Get the tangent space position of the vertex\n output.posTS = tbnTS * (MV * input.position).xyz;\n // Get the tangent space position of the camera view\n output.viewTS = tbnTS * vec3f(0.0, 0.0, 0.0);\n\n return output;\n}\n\n@fragment\nfn fragmentMain(input: VertexOutput) -> @location(0) vec4f {\n // Reconstruct tbnTS\n let tbnTS = mat3x3f(\n input.tbnTS0,\n input.tbnTS1,\n input.tbnTS2,\n );\n\n // Get direction of view in tangent space\n let viewDirTS = normalize(input.viewTS - input.posTS);\n\n // Get position, direction, and distance of light in tangent space (no need to multiply by model matrix as there is no model)\n let lightPosVS = spaceTransform.viewMatrix * vec4f(mapInfo.lightPosX, mapInfo.lightPosY, mapInfo.lightPosZ, 1.0);\n let lightPosTS = tbnTS * lightPosVS.xyz;\n let lightDirTS = normalize(lightPosTS - input.posTS);\n let lightDistanceTS = distance(input.posTS, lightPosTS);\n\n let depthMap = textureSample(depthTexture, textureSampler, input.uv); \n\n let uv = select(\n parallax_uv(input.uv, viewDirTS, depthMap.r, mapInfo.depthScale),\n input.uv,\n mapInfo.mappingType < 4\n );\n\n // Get values from textures\n let diffuseMap = textureSample(diffuseTexture, textureSampler, uv);\n let normalMap = textureSample(normalTexture, textureSampler, uv);\n\n // Get normal in tangent space\n let normalTS = normalize((normalMap.xyz * 2.0) - 1.0);\n \n // Calculate diffusion lighting\n let lightColorIntensity = vec3f(255.0, 255.0, 255.0) * mapInfo.lightIntensity;\n //How similar is the normal to the lightDirection\n let diffuseStrength = clamp(\n dot(normalTS, lightDirTS), 0.0, 1.0\n );\n // Strenght inversely proportional to square of distance from light\n let diffuseLight = (lightColorIntensity * diffuseStrength) / (lightDistanceTS * lightDistanceTS);\n\n switch (mapInfo.mappingType) {\n // Output the diffuse texture\n case 0: {\n return vec4f(diffuseMap.rgb, 1.0);\n }\n // Output the normal map\n case 1: {\n return vec4f(normalMap.rgb, 1.0);\n }\n // Output the height map\n case 2: {\n return vec4f(depthMap.rgb, 1.0);\n }\n default: {\n return vec4f(diffuseMap.rgb * diffuseLight, 1.0);\n }\n }\n}";let l=function(e,n){let t=arguments.length>2&&void 0!==arguments[2]&&arguments[2],r=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=t?GPUBufferUsage.VERTEX|GPUBufferUsage.STORAGE:GPUBufferUsage.VERTEX,i=r?GPUBufferUsage.INDEX|GPUBufferUsage.STORAGE:GPUBufferUsage.INDEX,o=e.createBuffer({size:n.vertices.byteLength,usage:a,mappedAtCreation:!0});new Float32Array(o.getMappedRange()).set(n.vertices),o.unmap();let s=e.createBuffer({size:n.indices.byteLength,usage:i,mappedAtCreation:!0});return n.indices.byteLength===n.indices.length*Uint16Array.BYTES_PER_ELEMENT?new Uint16Array(s.getMappedRange()).set(n.indices):new Uint32Array(s.getMappedRange()).set(n.indices),s.unmap(),{vertexBuffer:o,indexBuffer:s,indexCount:n.indices.length}},u=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+0,3);return i.R3.fromValues(t[0],t[1],t[2])},c=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+6*Float32Array.BYTES_PER_ELEMENT,2);return i.K4.fromValues(t[0],t[1])},d=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1;r=Math.floor(r),a=Math.floor(a),o=Math.floor(o);let s=[],l=[],u=0,c=(e,n,t,r,a,o,c,d,p,m)=>{let g=o/p,h=c/m,f=o/2,x=c/2,v=d/2,b=p+1,y=m+1,S=0,T=i.R3.create(),w=i.R3.create();for(let P=0;P0?1:-1,l.push(...w),l.push(U/p),l.push(1-P/m),S+=1}}for(let M=0;M0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,o=arguments.length>6&&void 0!==arguments[6]?arguments[6]:"uint16",{vertices:s,indices:l}=d(e,n,t,r,a,i),u=8*Float32Array.BYTES_PER_ELEMENT,c="uint16"===o?new Uint16Array(l):new Uint32Array(l);return{vertices:new Float32Array(s),indices:c,vertexStride:u}},m=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,s=p(e,n,t,r,a,o),l=s.vertexStride/Float32Array.BYTES_PER_ELEMENT,d=s.vertices.length/l,m=Array(d),g=Array(d),h=Array(d);for(let f=0;f{let s=[];for(let l=0;l{let n=e.split("x"),t=parseInt(n[0].replace(/[^0-9]/g,""))/8,r=t*(void 0!==n[1]?parseInt(n[1]):1);return r},f=e=>{let n=e.reduce((e,n,t)=>{let r={shaderLocation:t,offset:e.arrayStride,format:n},a=e.arrayStride+h(n),i={attributes:[...e.attributes,r],arrayStride:a};return i},{attributes:[],arrayStride:0}),t={arrayStride:n.arrayStride,attributes:n.attributes};return t},x=function(e,n,t,r,a,i,o){let s=arguments.length>7&&void 0!==arguments[7]&&arguments[7],l=arguments.length>8&&void 0!==arguments[8]?arguments[8]:"triangle-list",u=arguments.length>9&&void 0!==arguments[9]?arguments[9]:"back",c={label:"".concat(n,".pipeline"),layout:e.createPipelineLayout({label:"".concat(n,".pipelineLayout"),bindGroupLayouts:t}),vertex:{module:e.createShaderModule({label:"".concat(n,".vertexShader"),code:r}),entryPoint:"vertexMain",buffers:0!==a.length?[f(a)]:[]},fragment:{module:e.createShaderModule({label:"".concat(n,".fragmentShader"),code:i}),entryPoint:"fragmentMain",targets:[{format:o}]},primitive:{topology:l,cullMode:u}};return s&&(c.depthStencil={depthCompare:"less",depthWriteEnabled:!0,format:"depth24plus"}),e.createRenderPipeline(c)},v=(e,n)=>{let t=e.createTexture({size:[n.width,n.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});return e.queue.copyExternalImageToTexture({source:n},{texture:t},[n.width,n.height]),t};var b="src/sample/normalMap/main.ts";(r=a||(a={}))[r.Spiral=0]="Spiral",r[r.Toybox=1]="Toybox",r[r.BrickWall=2]="BrickWall";let y=async e=>{let n,r,o,u,c,d,p,h,{canvas:f,pageState:b,gui:y}=e,S=await navigator.gpu.requestAdapter(),T=await S.requestDevice();if(!b.active)return;let w=f.getContext("webgpu"),P=window.devicePixelRatio;f.width=f.clientWidth*P,f.height=f.clientHeight*P;let B=navigator.gpu.getPreferredCanvasFormat();w.configure({device:T,format:B,alphaMode:"premultiplied"});let U={"Bump Mode":"Normal Map",cameraPosX:0,cameraPosY:.8,cameraPosZ:-1.4,lightPosX:1.7,lightPosY:.7,lightPosZ:-1.9,lightIntensity:.02,depthScale:.05,depthLayers:16,Texture:"Spiral","Reset Light"(){}},G=T.createTexture({size:[f.width,f.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),M=T.createBuffer({size:256,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),E=T.createBuffer({size:7*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let V=await fetch(new t.U(t(3765)).toString()),A=await createImageBitmap(await V.blob());n=v(T,A)}{let F=await fetch(new t.U(t(6465)).toString()),_=await createImageBitmap(await F.blob());r=v(T,_)}{let R=await fetch(new t.U(t(2146)).toString()),L=await createImageBitmap(await R.blob());o=v(T,L)}{let D=await fetch(new t.U(t(2283)).toString()),I=await createImageBitmap(await D.blob());u=v(T,I)}{let C=await fetch(new t.U(t(5784)).toString()),N=await createImageBitmap(await C.blob());c=v(T,N)}{let j=await fetch(new t.U(t(7596)).toString()),Y=await createImageBitmap(await j.blob());d=v(T,Y)}{let O=await fetch(new t.U(t(4334)).toString()),X=await createImageBitmap(await O.blob());p=v(T,X)}{let z=await fetch(new t.U(t(7669)).toString()),k=await createImageBitmap(await z.blob());h=v(T,k)}let H=T.createSampler({magFilter:"linear",minFilter:"linear"}),q={colorAttachments:[{view:void 0,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:G.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},W=l(T,m(1,1,1)),Z=g([0,1],[GPUShaderStage.VERTEX|GPUShaderStage.FRAGMENT,GPUShaderStage.FRAGMENT|GPUShaderStage.VERTEX],["buffer","buffer"],[{type:"uniform"},{type:"uniform"}],[[{buffer:M},{buffer:E}]],"Frame",T),$=g([0,1,2,3],[GPUShaderStage.FRAGMENT],["sampler","texture","texture","texture"],[{type:"filtering"},{sampleType:"float"},{sampleType:"float"},{sampleType:"float"}],[[H,n.createView(),r.createView(),o.createView()],[H,n.createView(),u.createView(),c.createView()],[H,d.createView(),p.createView(),h.createView()]],"Surface",T),K=f.width/f.height,J=i._E.perspective(2*Math.PI/5,K,.1,10),Q=()=>{switch(U["Bump Mode"]){case"Diffuse Texture":return 0;case"Normal Texture":return 1;case"Depth Texture":return 2;case"Normal Map":return 3;case"Parallax Scale":return 4;case"Steep Parallax":return 5}},ee=x(T,"NormalMappingRender",[Z.bindGroupLayout,$.bindGroupLayout],s,["float32x3","float32x3","float32x2","float32x3","float32x3"],s,B,!0),en=0,et=()=>{en=a[U.Texture]};y.add(U,"Bump Mode",["Diffuse Texture","Normal Texture","Depth Texture","Normal Map","Parallax Scale","Steep Parallax"]),y.add(U,"Texture",["Spiral","Toybox","BrickWall"]).onChange(et);let er=y.addFolder("Light"),ea=y.addFolder("Depth");er.add(U,"Reset Light").onChange(()=>{ei.setValue(1.7),eo.setValue(.7),es.setValue(-1.9),el.setValue(.02)});let ei=er.add(U,"lightPosX",-5,5).step(.1),eo=er.add(U,"lightPosY",-5,5).step(.1),es=er.add(U,"lightPosZ",-5,5).step(.1),el=er.add(U,"lightIntensity",0,.1).step(.002);ea.add(U,"depthScale",0,.1).step(.01),ea.add(U,"depthLayers",1,32).step(1),requestAnimationFrame(function e(){if(!b.active)return;let n=i._E.lookAt([U.cameraPosX,U.cameraPosY,U.cameraPosZ],[0,0,0],[0,1,0]),t=function(){let e=i._E.create();i._E.identity(e);let n=Date.now()/1e3;return i._E.rotateY(e,-.5*n,e),e}(),r=new Float32Array([...J,...n,...t]),a=Q();T.queue.writeBuffer(M,0,r.buffer,r.byteOffset,r.byteLength),T.queue.writeBuffer(E,0,new Uint32Array([a])),T.queue.writeBuffer(E,4,new Float32Array([U.lightPosX,U.lightPosY,U.lightPosZ,U.lightIntensity,U.depthScale,U.depthLayers])),q.colorAttachments[0].view=w.getCurrentTexture().createView();let o=T.createCommandEncoder(),s=o.beginRenderPass(q);s.setPipeline(ee),s.setBindGroup(0,Z.bindGroups[0]),s.setBindGroup(1,$.bindGroups[en]),s.setVertexBuffer(0,W.vertexBuffer),s.setIndexBuffer(W.indexBuffer,"uint16"),s.drawIndexed(W.indexCount),s.end(),T.queue.submit([o.finish()]),requestAnimationFrame(e)})},S=()=>(0,o.Tl)({name:"Normal Mapping",description:"This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.",gui:!0,init:y,sources:[{name:b.substring(21),contents:"import { mat4 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport normalMapWGSL from './normalMap.wgsl';\nimport { createMeshRenderable } from '../../meshes/mesh';\nimport { createBoxMeshWithTangents } from '../../meshes/box';\nimport {\n createBindGroupDescriptor,\n create3DRenderPipeline,\n createTextureFromImage,\n} from './utils';\n\nconst MAT4X4_BYTES = 64;\nenum TextureAtlas {\n Spiral,\n Toybox,\n BrickWall,\n}\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n interface GUISettings {\n 'Bump Mode':\n | 'Diffuse Texture'\n | 'Normal Texture'\n | 'Depth Texture'\n | 'Normal Map'\n | 'Parallax Scale'\n | 'Steep Parallax';\n cameraPosX: number;\n cameraPosY: number;\n cameraPosZ: number;\n lightPosX: number;\n lightPosY: number;\n lightPosZ: number;\n lightIntensity: number;\n depthScale: number;\n depthLayers: number;\n Texture: string;\n 'Reset Light': () => void;\n }\n\n const settings: GUISettings = {\n 'Bump Mode': 'Normal Map',\n cameraPosX: 0.0,\n cameraPosY: 0.8,\n cameraPosZ: -1.4,\n lightPosX: 1.7,\n lightPosY: 0.7,\n lightPosZ: -1.9,\n lightIntensity: 0.02,\n depthScale: 0.05,\n depthLayers: 16,\n Texture: 'Spiral',\n 'Reset Light': () => {\n return;\n },\n };\n\n // Create normal mapping resources and pipeline\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBuffer = device.createBuffer({\n // Buffer holding projection, view, and model matrices plus padding bytes\n size: MAT4X4_BYTES * 4,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const mapMethodBuffer = device.createBuffer({\n // Buffer holding mapping type, light uniforms, and depth uniforms\n size: Float32Array.BYTES_PER_ELEMENT * 7,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Fetch the image and upload it into a GPUTexture.\n let woodDiffuseTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/wood_diffuse.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n woodDiffuseTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let spiralNormalTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/spiral_normal.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n spiralNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let spiralHeightTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/spiral_height.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n spiralHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let toyboxNormalTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/toybox_normal.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n toyboxNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let toyboxHeightTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/toybox_height.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n toyboxHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallDiffuseTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/brickwall_diffuse.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallDiffuseTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallNormalTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/brickwall_normal.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallNormalTexture = createTextureFromImage(device, imageBitmap);\n }\n\n let brickwallHeightTexture: GPUTexture;\n {\n const response = await fetch(\n new URL(\n '../../../assets/img/brickwall_height.png',\n import.meta.url\n ).toString()\n );\n const imageBitmap = await createImageBitmap(await response.blob());\n brickwallHeightTexture = createTextureFromImage(device, imageBitmap);\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const box = createMeshRenderable(\n device,\n createBoxMeshWithTangents(1.0, 1.0, 1.0)\n );\n\n // Uniform bindGroups and bindGroupLayout\n const frameBGDescriptor = createBindGroupDescriptor(\n [0, 1],\n [\n GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n GPUShaderStage.FRAGMENT | GPUShaderStage.VERTEX,\n ],\n ['buffer', 'buffer'],\n [{ type: 'uniform' }, { type: 'uniform' }],\n [[{ buffer: uniformBuffer }, { buffer: mapMethodBuffer }]],\n 'Frame',\n device\n );\n\n // Texture bindGroups and bindGroupLayout\n const surfaceBGDescriptor = createBindGroupDescriptor(\n [0, 1, 2, 3],\n [GPUShaderStage.FRAGMENT],\n ['sampler', 'texture', 'texture', 'texture'],\n [\n { type: 'filtering' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n ],\n // Multiple bindgroups that accord to the layout defined above\n [\n [\n sampler,\n woodDiffuseTexture.createView(),\n spiralNormalTexture.createView(),\n spiralHeightTexture.createView(),\n ],\n [\n sampler,\n woodDiffuseTexture.createView(),\n toyboxNormalTexture.createView(),\n toyboxHeightTexture.createView(),\n ],\n [\n sampler,\n brickwallDiffuseTexture.createView(),\n brickwallNormalTexture.createView(),\n brickwallHeightTexture.createView(),\n ],\n ],\n 'Surface',\n device\n );\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 0.1,\n 10.0\n ) as Float32Array;\n\n function getViewMatrix() {\n return mat4.lookAt(\n [settings.cameraPosX, settings.cameraPosY, settings.cameraPosZ],\n [0, 0, 0],\n [0, 1, 0]\n );\n }\n\n function getModelMatrix() {\n const modelMatrix = mat4.create();\n mat4.identity(modelMatrix);\n const now = Date.now() / 1000;\n mat4.rotateY(modelMatrix, now * -0.5, modelMatrix);\n return modelMatrix;\n }\n\n // Change the model mapping type\n const getMappingType = (): number => {\n switch (settings['Bump Mode']) {\n case 'Diffuse Texture':\n return 0;\n case 'Normal Texture':\n return 1;\n case 'Depth Texture':\n return 2;\n case 'Normal Map':\n return 3;\n case 'Parallax Scale':\n return 4;\n case 'Steep Parallax':\n return 5;\n }\n };\n\n const texturedCubePipeline = create3DRenderPipeline(\n device,\n 'NormalMappingRender',\n [frameBGDescriptor.bindGroupLayout, surfaceBGDescriptor.bindGroupLayout],\n normalMapWGSL,\n // Position, normal uv tangent bitangent\n ['float32x3', 'float32x3', 'float32x2', 'float32x3', 'float32x3'],\n normalMapWGSL,\n presentationFormat,\n true\n );\n\n let currentSurfaceBindGroup = 0;\n const onChangeTexture = () => {\n currentSurfaceBindGroup = TextureAtlas[settings.Texture];\n };\n\n gui.add(settings, 'Bump Mode', [\n 'Diffuse Texture',\n 'Normal Texture',\n 'Depth Texture',\n 'Normal Map',\n 'Parallax Scale',\n 'Steep Parallax',\n ]);\n gui\n .add(settings, 'Texture', ['Spiral', 'Toybox', 'BrickWall'])\n .onChange(onChangeTexture);\n const lightFolder = gui.addFolder('Light');\n const depthFolder = gui.addFolder('Depth');\n lightFolder.add(settings, 'Reset Light').onChange(() => {\n lightPosXController.setValue(1.7);\n lightPosYController.setValue(0.7);\n lightPosZController.setValue(-1.9);\n lightIntensityController.setValue(0.02);\n });\n const lightPosXController = lightFolder\n .add(settings, 'lightPosX', -5, 5)\n .step(0.1);\n const lightPosYController = lightFolder\n .add(settings, 'lightPosY', -5, 5)\n .step(0.1);\n const lightPosZController = lightFolder\n .add(settings, 'lightPosZ', -5, 5)\n .step(0.1);\n const lightIntensityController = lightFolder\n .add(settings, 'lightIntensity', 0.0, 0.1)\n .step(0.002);\n depthFolder.add(settings, 'depthScale', 0.0, 0.1).step(0.01);\n depthFolder.add(settings, 'depthLayers', 1, 32).step(1);\n\n function frame() {\n if (!pageState.active) return;\n\n // Write to normal map shader\n const viewMatrix = getViewMatrix();\n\n const modelMatrix = getModelMatrix();\n\n const matrices = new Float32Array([\n ...projectionMatrix,\n ...viewMatrix,\n ...modelMatrix,\n ]);\n\n const mappingType = getMappingType();\n\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n matrices.buffer,\n matrices.byteOffset,\n matrices.byteLength\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 0,\n new Uint32Array([mappingType])\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 4,\n new Float32Array([\n settings.lightPosX,\n settings.lightPosY,\n settings.lightPosZ,\n settings.lightIntensity,\n settings.depthScale,\n settings.depthLayers,\n ])\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n // Draw textured Cube\n passEncoder.setPipeline(texturedCubePipeline);\n passEncoder.setBindGroup(0, frameBGDescriptor.bindGroups[0]);\n passEncoder.setBindGroup(\n 1,\n surfaceBGDescriptor.bindGroups[currentSurfaceBindGroup]\n );\n passEncoder.setVertexBuffer(0, box.vertexBuffer);\n passEncoder.setIndexBuffer(box.indexBuffer, 'uint16');\n passEncoder.drawIndexed(box.indexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst NormalMapping: () => JSX.Element = () =>\n makeSample({\n name: 'Normal Mapping',\n description:\n 'This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './normalMap.wgsl',\n contents: normalMapWGSL,\n editable: true,\n },\n {\n name: '../../meshes/box.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/box.ts').default,\n },\n {\n name: '../../meshes/mesh.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/mesh.ts').default,\n },\n {\n name: './utils.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!./utils.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default NormalMapping;\n"},{name:"./normalMap.wgsl",contents:s,editable:!0},{name:"../../meshes/box.ts",contents:t(3583).Z},{name:"../../meshes/mesh.ts",contents:t(3150).Z},{name:"./utils.ts",contents:t(1146).Z}],filename:b});var T=S},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},3583:function(e,n){"use strict";n.Z="import { vec3 } from 'wgpu-matrix';\nimport { getMeshPosAtIndex, getMeshUVAtIndex, Mesh } from './mesh';\n\nexport interface BoxMesh extends Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n//// Borrowed and simplified from https://github.com/mrdoob/three.js/blob/master/src/geometries/BoxGeometry.js\n//// Presumes vertex buffer alignment of verts, normals, and uvs\nconst createBoxGeometry = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n) => {\n widthSegments = Math.floor(widthSegments);\n heightSegments = Math.floor(heightSegments);\n depthSegments = Math.floor(depthSegments);\n\n const indices = [];\n const vertNormalUVBuffer = [];\n\n let numVertices = 0;\n\n const buildPlane = (\n u: 0 | 1 | 2,\n v: 0 | 1 | 2,\n w: 0 | 1 | 2,\n udir: -1 | 1,\n vdir: -1 | 1,\n planeWidth: number,\n planeHeight: number,\n planeDepth: number,\n xSections: number,\n ySections: number\n ) => {\n const segmentWidth = planeWidth / xSections;\n const segmentHeight = planeHeight / ySections;\n\n const widthHalf = planeWidth / 2;\n const heightHalf = planeHeight / 2;\n const depthHalf = planeDepth / 2;\n\n const gridX1 = xSections + 1;\n const gridY1 = ySections + 1;\n\n let vertexCounter = 0;\n\n const vertex = vec3.create();\n const normal = vec3.create();\n for (let iy = 0; iy < gridY1; iy++) {\n const y = iy * segmentHeight - heightHalf;\n\n for (let ix = 0; ix < gridX1; ix++) {\n const x = ix * segmentWidth - widthHalf;\n\n //Calculate plane vertices\n vertex[u] = x * udir;\n vertex[v] = y * vdir;\n vertex[w] = depthHalf;\n vertNormalUVBuffer.push(...vertex);\n\n //Caclulate normal\n normal[u] = 0;\n normal[v] = 0;\n normal[w] = planeDepth > 0 ? 1.0 : -1.0;\n vertNormalUVBuffer.push(...normal);\n\n //Calculate uvs\n vertNormalUVBuffer.push(ix / xSections);\n vertNormalUVBuffer.push(1 - iy / ySections);\n\n vertexCounter += 1;\n }\n }\n\n for (let iy = 0; iy < ySections; iy++) {\n for (let ix = 0; ix < xSections; ix++) {\n const a = numVertices + ix + gridX1 * iy;\n const b = numVertices + ix + gridX1 * (iy + 1);\n const c = numVertices + (ix + 1) + gridX1 * (iy + 1);\n const d = numVertices + (ix + 1) + gridX1 * iy;\n\n //Push vertex indices\n //6 indices for each face\n indices.push(a, b, d);\n indices.push(b, c, d);\n\n numVertices += vertexCounter;\n }\n }\n };\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n -1,\n -1,\n depth,\n height,\n width,\n depthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n 1,\n -1,\n depth,\n height,\n -width,\n depthSegments,\n heightSegments\n );\n\n //Bottom face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n 1,\n width,\n depth,\n height,\n widthSegments,\n depthSegments\n );\n\n //Top face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n -1,\n width,\n depth,\n -height,\n widthSegments,\n depthSegments\n );\n\n //Side faces\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n 1,\n -1,\n width,\n height,\n depth,\n widthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n -1,\n -1,\n width,\n height,\n -depth,\n widthSegments,\n heightSegments\n );\n\n return {\n vertices: vertNormalUVBuffer,\n indices: indices,\n };\n};\n\ntype IndexFormat = 'uint16' | 'uint32';\n\n// Box mesh code ported from threejs, with addition of indexFormat specifier for vertex pulling\nexport const createBoxMesh = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0,\n indexFormat: IndexFormat = 'uint16'\n): Mesh => {\n const { vertices, indices } = createBoxGeometry(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const vertexStride = 8 * Float32Array.BYTES_PER_ELEMENT; //calculateVertexStride(vertexProperties);\n\n const indicesArray =\n indexFormat === 'uint16'\n ? new Uint16Array(indices)\n : new Uint32Array(indices);\n\n return {\n vertices: new Float32Array(vertices),\n indices: indicesArray,\n vertexStride: vertexStride,\n };\n};\n\nexport const createBoxMeshWithTangents = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n): Mesh => {\n const mesh = createBoxMesh(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const originalStrideElements =\n mesh.vertexStride / Float32Array.BYTES_PER_ELEMENT;\n\n const vertexCount = mesh.vertices.length / originalStrideElements;\n\n const tangents = new Array(vertexCount);\n const bitangents = new Array(vertexCount);\n const counts = new Array(vertexCount);\n for (let i = 0; i < vertexCount; i++) {\n tangents[i] = [0, 0, 0];\n bitangents[i] = [0, 0, 0];\n counts[i] = 0;\n }\n\n for (let i = 0; i < mesh.indices.length; i += 3) {\n const [idx1, idx2, idx3] = [\n mesh.indices[i],\n mesh.indices[i + 1],\n mesh.indices[i + 2],\n ];\n\n const [pos1, pos2, pos3] = [\n getMeshPosAtIndex(mesh, idx1),\n getMeshPosAtIndex(mesh, idx2),\n getMeshPosAtIndex(mesh, idx3),\n ];\n\n const [uv1, uv2, uv3] = [\n getMeshUVAtIndex(mesh, idx1),\n getMeshUVAtIndex(mesh, idx2),\n getMeshUVAtIndex(mesh, idx3),\n ];\n\n const edge1 = vec3.sub(pos2, pos1);\n const edge2 = vec3.sub(pos3, pos1);\n const deltaUV1 = vec3.sub(uv2, uv1);\n const deltaUV2 = vec3.sub(uv3, uv1);\n\n // Edge of a triangle moves in both u and v direction (2d)\n // deltaU * tangent vector + deltav * bitangent\n // Manipulating the data into matrices, we get an equation\n\n const constantVal =\n 1.0 / (deltaUV1[0] * deltaUV2[1] - deltaUV1[1] * deltaUV2[0]);\n\n const tangent = [\n constantVal * (deltaUV2[1] * edge1[0] - deltaUV1[1] * edge2[0]),\n constantVal * (deltaUV2[1] * edge1[1] - deltaUV1[1] * edge2[1]),\n constantVal * (deltaUV2[1] * edge1[2] - deltaUV1[1] * edge2[2]),\n ];\n\n const bitangent = [\n constantVal * (-deltaUV2[0] * edge1[0] + deltaUV1[0] * edge2[0]),\n constantVal * (-deltaUV2[0] * edge1[1] + deltaUV1[0] * edge2[1]),\n constantVal * (-deltaUV2[0] * edge1[2] + deltaUV1[0] * edge2[2]),\n ];\n\n //Accumulate tangents and bitangents\n tangents[idx1] = vec3.add(tangents[idx1], tangent);\n bitangents[idx1] = vec3.add(bitangents[idx1], bitangent);\n tangents[idx2] = vec3.add(tangents[idx2], tangent);\n bitangents[idx2] = vec3.add(bitangents[idx2], bitangent);\n tangents[idx3] = vec3.add(tangents[idx3], tangent);\n bitangents[idx3] = vec3.add(bitangents[idx3], bitangent);\n\n //Increment index count\n counts[idx1]++;\n counts[idx2]++;\n counts[idx3]++;\n }\n\n for (let i = 0; i < tangents.length; i++) {\n tangents[i] = vec3.divScalar(tangents[i], counts[i]);\n bitangents[i] = vec3.divScalar(bitangents[i], counts[i]);\n }\n\n const newStrideElements = 14;\n const wTangentArray = new Float32Array(vertexCount * newStrideElements);\n\n for (let i = 0; i < vertexCount; i++) {\n //Copy original vertex data (pos, normal uv)\n wTangentArray.set(\n //Get the original vertex [8 elements] (3 ele pos, 3 ele normal, 2 ele uv)\n mesh.vertices.subarray(\n i * originalStrideElements,\n (i + 1) * originalStrideElements\n ),\n //And put it at the proper location in the new array [14 bytes = 8 og + 6 empty]\n i * newStrideElements\n );\n //For each vertex, place tangent after originalStride\n wTangentArray.set(\n tangents[i],\n i * newStrideElements + originalStrideElements\n );\n //Place bitangent after 3 elements of tangent\n wTangentArray.set(\n bitangents[i],\n i * newStrideElements + originalStrideElements + 3\n );\n }\n\n return {\n vertices: wTangentArray,\n indices: mesh.indices,\n vertexStride: mesh.vertexStride + Float32Array.BYTES_PER_ELEMENT * 3 * 2,\n };\n};\n"},3150:function(e,n){"use strict";n.Z="import { vec3, vec2 } from 'wgpu-matrix';\n\n// Defines what to pass to pipeline to render mesh\nexport interface Renderable {\n vertexBuffer: GPUBuffer;\n indexBuffer: GPUBuffer;\n indexCount: number;\n bindGroup?: GPUBindGroup;\n}\n\nexport interface Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n/**\n * @param {GPUDevice} device - A valid GPUDevice.\n * @param {Mesh} mesh - An indexed triangle-list mesh, containing its vertices, indices, and vertexStride (number of elements per vertex).\n * @param {boolean} storeVertices - A boolean flag indicating whether the vertexBuffer should be available to use as a storage buffer.\n * @returns {boolean} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createMeshRenderable = (\n device: GPUDevice,\n mesh: Mesh,\n storeVertices = false,\n storeIndices = false\n): Renderable => {\n // Define buffer usage\n const vertexBufferUsage = storeVertices\n ? GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.VERTEX;\n const indexBufferUsage = storeIndices\n ? GPUBufferUsage.INDEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.INDEX;\n\n // Create vertex and index buffers\n const vertexBuffer = device.createBuffer({\n size: mesh.vertices.byteLength,\n usage: vertexBufferUsage,\n mappedAtCreation: true,\n });\n new Float32Array(vertexBuffer.getMappedRange()).set(mesh.vertices);\n vertexBuffer.unmap();\n\n const indexBuffer = device.createBuffer({\n size: mesh.indices.byteLength,\n usage: indexBufferUsage,\n mappedAtCreation: true,\n });\n\n // Determine whether index buffer is indices are in uint16 or uint32 format\n if (\n mesh.indices.byteLength ===\n mesh.indices.length * Uint16Array.BYTES_PER_ELEMENT\n ) {\n new Uint16Array(indexBuffer.getMappedRange()).set(mesh.indices);\n } else {\n new Uint32Array(indexBuffer.getMappedRange()).set(mesh.indices);\n }\n\n indexBuffer.unmap();\n\n return {\n vertexBuffer,\n indexBuffer,\n indexCount: mesh.indices.length,\n };\n};\n\nexport const getMeshPosAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 0,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshNormalAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 3 * Float32Array.BYTES_PER_ELEMENT,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshUVAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 6 * Float32Array.BYTES_PER_ELEMENT,\n 2\n );\n return vec2.fromValues(arr[0], arr[1]);\n};\n"},1146:function(e,n){"use strict";n.Z="type BindGroupBindingLayout =\n | GPUBufferBindingLayout\n | GPUTextureBindingLayout\n | GPUSamplerBindingLayout\n | GPUStorageTextureBindingLayout\n | GPUExternalTextureBindingLayout;\n\nexport type BindGroupsObjectsAndLayout = {\n bindGroups: GPUBindGroup[];\n bindGroupLayout: GPUBindGroupLayout;\n};\n\ntype ResourceTypeName =\n | 'buffer'\n | 'texture'\n | 'sampler'\n | 'externalTexture'\n | 'storageTexture';\n\n/**\n * @param {number[]} bindings - The binding value of each resource in the bind group.\n * @param {number[]} visibilities - The GPUShaderStage visibility of the resource at the corresponding index.\n * @param {ResourceTypeName[]} resourceTypes - The resourceType at the corresponding index.\n * @returns {BindGroupsObjectsAndLayout} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createBindGroupDescriptor = (\n bindings: number[],\n visibilities: number[],\n resourceTypes: ResourceTypeName[],\n resourceLayouts: BindGroupBindingLayout[],\n resources: GPUBindingResource[][],\n label: string,\n device: GPUDevice\n): BindGroupsObjectsAndLayout => {\n // Create layout of each entry within a bindGroup\n const layoutEntries: GPUBindGroupLayoutEntry[] = [];\n for (let i = 0; i < bindings.length; i++) {\n layoutEntries.push({\n binding: bindings[i],\n visibility: visibilities[i % visibilities.length],\n [resourceTypes[i]]: resourceLayouts[i],\n });\n }\n\n // Apply entry layouts to bindGroupLayout\n const bindGroupLayout = device.createBindGroupLayout({\n label: `${label}.bindGroupLayout`,\n entries: layoutEntries,\n });\n\n // Create bindGroups that conform to the layout\n const bindGroups: GPUBindGroup[] = [];\n for (let i = 0; i < resources.length; i++) {\n const groupEntries: GPUBindGroupEntry[] = [];\n for (let j = 0; j < resources[0].length; j++) {\n groupEntries.push({\n binding: j,\n resource: resources[i][j],\n });\n }\n const newBindGroup = device.createBindGroup({\n label: `${label}.bindGroup${i}`,\n layout: bindGroupLayout,\n entries: groupEntries,\n });\n bindGroups.push(newBindGroup);\n }\n\n return {\n bindGroups,\n bindGroupLayout,\n };\n};\n\nexport type ShaderKeyInterface = {\n [K in T[number]]: number;\n};\n\ninterface AttribAcc {\n attributes: GPUVertexAttribute[];\n arrayStride: number;\n}\n\n/**\n * @param {GPUVertexFormat} vf - A valid GPUVertexFormat, representing a per-vertex value that can be passed to the vertex shader.\n * @returns {number} The number of bytes present in the value to be passed.\n */\nexport const convertVertexFormatToBytes = (vf: GPUVertexFormat): number => {\n const splitFormat = vf.split('x');\n const bytesPerElement = parseInt(splitFormat[0].replace(/[^0-9]/g, '')) / 8;\n\n const bytesPerVec =\n bytesPerElement *\n (splitFormat[1] !== undefined ? parseInt(splitFormat[1]) : 1);\n\n return bytesPerVec;\n};\n\n/** Creates a GPUVertexBuffer Layout that maps to an interleaved vertex buffer.\n * @param {GPUVertexFormat[]} vertexFormats - An array of valid GPUVertexFormats.\n * @returns {GPUVertexBufferLayout} A GPUVertexBufferLayout representing an interleaved vertex buffer.\n */\nexport const createVBuffer = (\n vertexFormats: GPUVertexFormat[]\n): GPUVertexBufferLayout => {\n const initialValue: AttribAcc = { attributes: [], arrayStride: 0 };\n\n const vertexBuffer = vertexFormats.reduce(\n (acc: AttribAcc, curr: GPUVertexFormat, idx: number) => {\n const newAttribute: GPUVertexAttribute = {\n shaderLocation: idx,\n offset: acc.arrayStride,\n format: curr,\n };\n const nextOffset: number =\n acc.arrayStride + convertVertexFormatToBytes(curr);\n\n const retVal: AttribAcc = {\n attributes: [...acc.attributes, newAttribute],\n arrayStride: nextOffset,\n };\n return retVal;\n },\n initialValue\n );\n\n const layout: GPUVertexBufferLayout = {\n arrayStride: vertexBuffer.arrayStride,\n attributes: vertexBuffer.attributes,\n };\n\n return layout;\n};\n\nexport const create3DRenderPipeline = (\n device: GPUDevice,\n label: string,\n bgLayouts: GPUBindGroupLayout[],\n vertexShader: string,\n vBufferFormats: GPUVertexFormat[],\n fragmentShader: string,\n presentationFormat: GPUTextureFormat,\n depthTest = false,\n topology: GPUPrimitiveTopology = 'triangle-list',\n cullMode: GPUCullMode = 'back'\n) => {\n const pipelineDescriptor: GPURenderPipelineDescriptor = {\n label: `${label}.pipeline`,\n layout: device.createPipelineLayout({\n label: `${label}.pipelineLayout`,\n bindGroupLayouts: bgLayouts,\n }),\n vertex: {\n module: device.createShaderModule({\n label: `${label}.vertexShader`,\n code: vertexShader,\n }),\n entryPoint: 'vertexMain',\n buffers:\n vBufferFormats.length !== 0 ? [createVBuffer(vBufferFormats)] : [],\n },\n fragment: {\n module: device.createShaderModule({\n label: `${label}.fragmentShader`,\n code: fragmentShader,\n }),\n entryPoint: 'fragmentMain',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: topology,\n cullMode: cullMode,\n },\n };\n if (depthTest) {\n pipelineDescriptor.depthStencil = {\n depthCompare: 'less',\n depthWriteEnabled: true,\n format: 'depth24plus',\n };\n }\n return device.createRenderPipeline(pipelineDescriptor);\n};\n\nexport const createTextureFromImage = (\n device: GPUDevice,\n bitmap: ImageBitmap\n) => {\n const texture: GPUTexture = device.createTexture({\n size: [bitmap.width, bitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: bitmap },\n { texture: texture },\n [bitmap.width, bitmap.height]\n );\n return texture;\n};\n"},7596:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/brickwall_diffuse.c9ee5359ababda94.png"},7669:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/brickwall_height.5e7f3bd0e5c45632.png"},4334:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/brickwall_normal.12f32d2510fd6264.png"},2146:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/spiral_height.0c894e7810776e93.png"},6465:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/spiral_normal.5cdc922342aadd02.png"},5784:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/toybox_height.826b323f99a3103b.png"},2283:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/toybox_normal.5758b42f35d39dd7.png"},3765:function(e,n,t){"use strict";e.exports=t.p+"static/assets/img/wood_diffuse.bfe4491cf7c50e45.png"}}]); \ No newline at end of file diff --git a/_next/static/chunks/118.79ec79606fecb947.js b/_next/static/chunks/118.79ec79606fecb947.js deleted file mode 100644 index b747e033..00000000 --- a/_next/static/chunks/118.79ec79606fecb947.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[118],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return c},hu:function(){return p}});var r=t(5893),a=t(9008),i=t.n(a),o=t(1163),s=t(7294),l=t(9147),u=t.n(l);t(7319);let d=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let i=t(4631);n=i(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),l=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),c=(0,s.useRef)(null),p=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,o.useRouter)(),h=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[g,f]=(0,s.useState)(null),[x,v]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?v(h[1]):v(a[0].name),d&&l.current)for(l.current.appendChild(d.domElement);d.__controllers.length>0;)d.__controllers[0].remove();p&&c.current&&(p.dom.style.position="absolute",p.showPanel(1),c.current.appendChild(p.dom));let t={active:!0},r=()=>{t.active=!1};try{let i=n.current;if(!i)throw Error("The canvas is not available");let o=e.init({canvas:i,pageState:t,gui:d,stats:p});o instanceof Promise&&o.catch(e=>{console.error(e),f(e)})}catch(s){console.error(s),f(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(i(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),g?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(g)})]}):null]}),(0,r.jsxs)("div",{className:u().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:c}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:l}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:u().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":x==e.name,onClick(){v(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:u().sourceFileContainer,"data-active":x==e.name},n))]})]})},c=e=>(0,r.jsx)(d,{...e});function p(e,n){if(!e)throw Error(n)}},7118:function(e,n,t){"use strict";t.r(n),t.d(n,{default:function(){return T}});var r,a,i=t(6416),o=t(5671),s="struct SpaceTransformUniforms {\n projMatrix: mat4x4f,\n viewMatrix: mat4x4f,\n modelMatrix: mat4x4f,\n}\n\nstruct Uniforms_MapInfo {\n mappingType: u32,\n lightPosX: f32,\n lightPosY: f32,\n lightPosZ: f32,\n lightIntensity: f32,\n depthScale: f32,\n depthLayers: f32,\n}\n\nstruct VertexInput {\n // Shader assumes the missing 4th float is 1.0\n @location(0) position : vec4f,\n @location(1) normal : vec3f,\n @location(2) uv : vec2f,\n @location(3) vert_tan: vec3f,\n @location(4) vert_bitan: vec3f,\n}\n\nstruct VertexOutput {\n @builtin(position) Position : vec4f,\n @location(0) normal: vec3f,\n @location(1) uv : vec2f,\n // Vertex position in world space\n @location(2) posWS: vec3f,\n // Vertex position in tangent space\n @location(3) posTS: vec3f,\n // View position in tangent space\n @location(4) viewTS: vec3f,\n // Extracted components of our tbn matrix\n @location(5) tbnTS0: vec3, \n @location(6) tbnTS1: vec3,\n @location(7) tbnTS2: vec3,\n}\n\n// Uniforms\n@group(0) @binding(0) var spaceTransform : SpaceTransformUniforms;\n@group(0) @binding(1) var mapInfo: Uniforms_MapInfo;\n\n// Texture info\n@group(1) @binding(0) var textureSampler: sampler;\n@group(1) @binding(1) var diffuseTexture: texture_2d;\n@group(1) @binding(2) var normalTexture: texture_2d;\n@group(1) @binding(3) var depthTexture: texture_2d;\n\nfn parallax_uv(\n uv: vec2f, \n viewDirTS: vec3f, \n depthSample: f32,\n depthScale: f32,\n) -> vec2f {\n if (mapInfo.mappingType == 4) {\n // Perturb uv coordinates based on depth and camera direction\n let p = viewDirTS.xy * (depthSample * depthScale) / viewDirTS.z;\n return uv - p;\n }\n // Break up depth space into layers\n let depthPerLayer = 1.0 / f32(mapInfo.depthLayers);\n // Start at lowest depth\n var currentDepth = 0.0;\n let delta_uv = viewDirTS.xy * depthScale / (viewDirTS.z * mapInfo.depthLayers);\n var prev_uv = uv;\n var cur_uv = uv;\n\n var depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n var prevDepthFromTexture = depthFromTexture;\n var prevCurrentDepth = currentDepth;\n for (var i: u32 = 0; i < 32; i++) {\n currentDepth += depthPerLayer;\n prev_uv = cur_uv;\n cur_uv -= delta_uv;\n depthFromTexture = textureSample(depthTexture, textureSampler, cur_uv).r;\n // Determine whether current depth is greater than depth map\n // Once we reach a certain threshold, we stop updating cur_uv\n cur_uv = select(cur_uv, prev_uv, depthFromTexture < currentDepth);\n prevDepthFromTexture = select(depthFromTexture, prevDepthFromTexture, prevDepthFromTexture < currentDepth);\n prevCurrentDepth = select(currentDepth, prevCurrentDepth, prevDepthFromTexture < currentDepth);\n }\n return cur_uv;\n}\n\nfn when_greater(v1: f32, v2: f32) -> f32 {\n return max(sign(v1 - v2), 0.0);\n}\n\n@vertex\nfn vertexMain(input: VertexInput) -> VertexOutput {\n var output : VertexOutput;\n // Create the Model to View Matrix\n let MV = spaceTransform.viewMatrix * spaceTransform.modelMatrix;\n // Create the Model to View to Projection Matrix\n let MVP = spaceTransform.projMatrix * MV;\n \n // Get Clip space transforms and pass through values out of the way\n output.Position = MVP * input.position;\n output.uv = input.uv;\n output.normal = input.normal;\n\n // Multiply pos by modelMatrix to get the vertex/fragment's position in world space\n output.posWS = vec3f((spaceTransform.modelMatrix * input.position).xyz);\n \n var MV3x3 = mat3x3f(\n MV[0].xyz,\n MV[1].xyz,\n MV[2].xyz\n );\n\n // Get unit vectors of normal, tangent, and bitangents in model space\n let vertexTangent = normalize(input.vert_tan);\n let vertexBitangent = normalize(input.vert_bitan);\n let vertexNormal = normalize(input.normal);\n\n // Convert tbn unit vectors to mv space for a model view tbn\n var tbnTS = transpose(\n MV3x3 * mat3x3f(\n vertexTangent,\n vertexBitangent,\n vertexNormal\n )\n );\n // Condense to vec3s so they can be passed to fragment shader\n output.tbnTS0 = tbnTS[0];\n output.tbnTS1 = tbnTS[1];\n output.tbnTS2 = tbnTS[2];\n\n // Get the tangent space position of the vertex\n output.posTS = tbnTS * (MV * input.position).xyz;\n // Get the tangent space position of the camera view\n output.viewTS = tbnTS * vec3f(0.0, 0.0, 0.0);\n\n return output;\n}\n\n@fragment\nfn fragmentMain(input: VertexOutput) -> @location(0) vec4f {\n // Reconstruct tbnTS\n let tbnTS = mat3x3f(\n input.tbnTS0,\n input.tbnTS1,\n input.tbnTS2,\n );\n\n // Get direction of view in tangent space\n let viewDirTS = normalize(input.viewTS - input.posTS);\n\n // Get position, direction, and distance of light in tangent space (no need to multiply by model matrix as there is no model)\n let lightPosVS = spaceTransform.viewMatrix * vec4f(mapInfo.lightPosX, mapInfo.lightPosY, mapInfo.lightPosZ, 1.0);\n let lightPosTS = tbnTS * lightPosVS.xyz;\n let lightDirTS = normalize(lightPosTS - input.posTS);\n let lightDistanceTS = distance(input.posTS, lightPosTS);\n\n let depthMap = textureSample(depthTexture, textureSampler, input.uv); \n\n let uv = select(\n parallax_uv(input.uv, viewDirTS, depthMap.r, mapInfo.depthScale),\n input.uv,\n mapInfo.mappingType < 4\n );\n\n // Get values from textures\n let diffuseMap = textureSample(diffuseTexture, textureSampler, uv);\n let normalMap = textureSample(normalTexture, textureSampler, uv);\n\n // Get normal in tangent space\n let normalTS = normalize((normalMap.xyz * 2.0) - 1.0);\n \n // Calculate diffusion lighting\n let lightColorIntensity = vec3f(255.0, 255.0, 255.0) * mapInfo.lightIntensity;\n //How similar is the normal to the lightDirection\n let diffuseStrength = clamp(\n dot(normalTS, lightDirTS), 0.0, 1.0\n );\n // Strenght inversely proportional to square of distance from light\n let diffuseLight = (lightColorIntensity * diffuseStrength) / (lightDistanceTS * lightDistanceTS);\n\n switch (mapInfo.mappingType) {\n // Output the diffuse texture\n case 0: {\n return vec4f(diffuseMap.rgb, 1.0);\n }\n // Output the normal map\n case 1: {\n return vec4f(normalMap.rgb, 1.0);\n }\n // Output the height map\n case 2: {\n return vec4f(depthMap.rgb, 1.0);\n }\n default: {\n return vec4f(diffuseMap.rgb * diffuseLight, 1.0);\n }\n }\n}";let l=function(e,n){let t=arguments.length>2&&void 0!==arguments[2]&&arguments[2],r=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=t?GPUBufferUsage.VERTEX|GPUBufferUsage.STORAGE:GPUBufferUsage.VERTEX,i=r?GPUBufferUsage.INDEX|GPUBufferUsage.STORAGE:GPUBufferUsage.INDEX,o=e.createBuffer({size:n.vertices.byteLength,usage:a,mappedAtCreation:!0});new Float32Array(o.getMappedRange()).set(n.vertices),o.unmap();let s=e.createBuffer({size:n.indices.byteLength,usage:i,mappedAtCreation:!0});return n.indices.byteLength===n.indices.length*Uint16Array.BYTES_PER_ELEMENT?new Uint16Array(s.getMappedRange()).set(n.indices):new Uint32Array(s.getMappedRange()).set(n.indices),s.unmap(),{vertexBuffer:o,indexBuffer:s,indexCount:n.indices.length}},u=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+0,3);return i.R3.fromValues(t[0],t[1],t[2])},d=(e,n)=>{let t=new Float32Array(e.vertices.buffer,n*e.vertexStride+6*Float32Array.BYTES_PER_ELEMENT,2);return i.K4.fromValues(t[0],t[1])},c=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1;r=Math.floor(r),a=Math.floor(a),o=Math.floor(o);let s=[],l=[],u=0,d=(e,n,t,r,a,o,d,c,p,m)=>{let h=o/p,g=d/m,f=o/2,x=d/2,v=c/2,b=p+1,y=m+1,S=0,P=i.R3.create(),T=i.R3.create();for(let w=0;w0?1:-1,l.push(...T),l.push(G/p),l.push(1-w/m),S+=1}}for(let U=0;U0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,o=arguments.length>6&&void 0!==arguments[6]?arguments[6]:"uint16",{vertices:s,indices:l}=c(e,n,t,r,a,i),u=8*Float32Array.BYTES_PER_ELEMENT,d="uint16"===o?new Uint16Array(l):new Uint32Array(l);return{vertices:new Float32Array(s),indices:d,vertexStride:u}},m=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1,t=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1,a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:1,o=arguments.length>5&&void 0!==arguments[5]?arguments[5]:1,s=p(e,n,t,r,a,o),l=s.vertexStride/Float32Array.BYTES_PER_ELEMENT,c=s.vertices.length/l,m=Array(c),h=Array(c),g=Array(c);for(let f=0;f{let s=[];for(let l=0;l{let n=e.split("x"),t=parseInt(n[0].replace(/[^0-9]/g,""))/8,r=t*(void 0!==n[1]?parseInt(n[1]):1);return r},f=e=>{let n=e.reduce((e,n,t)=>{let r={shaderLocation:t,offset:e.arrayStride,format:n},a=e.arrayStride+g(n),i={attributes:[...e.attributes,r],arrayStride:a};return i},{attributes:[],arrayStride:0}),t={arrayStride:n.arrayStride,attributes:n.attributes};return t},x=function(e,n,t,r,a,i,o){let s=arguments.length>7&&void 0!==arguments[7]&&arguments[7],l=arguments.length>8&&void 0!==arguments[8]?arguments[8]:"triangle-list",u=arguments.length>9&&void 0!==arguments[9]?arguments[9]:"back",d={label:"".concat(n,".pipeline"),layout:e.createPipelineLayout({label:"".concat(n,".pipelineLayout"),bindGroupLayouts:t}),vertex:{module:e.createShaderModule({label:"".concat(n,".vertexShader"),code:r}),entryPoint:"vertexMain",buffers:0!==a.length?[f(a)]:[]},fragment:{module:e.createShaderModule({label:"".concat(n,".fragmentShader"),code:i}),entryPoint:"fragmentMain",targets:[{format:o}]},primitive:{topology:l,cullMode:u}};return s&&(d.depthStencil={depthCompare:"less",depthWriteEnabled:!0,format:"depth24plus"}),e.createRenderPipeline(d)},v=(e,n)=>{let t=e.createTexture({size:[n.width,n.height,1],format:"rgba8unorm",usage:GPUTextureUsage.TEXTURE_BINDING|GPUTextureUsage.COPY_DST|GPUTextureUsage.RENDER_ATTACHMENT});return e.queue.copyExternalImageToTexture({source:n},{texture:t},[n.width,n.height]),t},b=async(e,n)=>{let t=n.map(e=>{let n=e.split("_"),t=n[n.length-1].split(".")[0];return{url:"/img/"+e,type:t}});console.log(t);let r={};for(let a=0;a{let n,t,r,{canvas:o,pageState:u,gui:d}=e,c=await navigator.gpu.requestAdapter(),p=await c.requestDevice();if(!u.active)return;let g=o.getContext("webgpu"),f=window.devicePixelRatio;o.width=o.clientWidth*f,o.height=o.clientHeight*f;let v=navigator.gpu.getPreferredCanvasFormat();g.configure({device:p,format:v,alphaMode:"premultiplied"});let y={"Bump Mode":"Normal Map",cameraPosX:0,cameraPosY:.8,cameraPosZ:-1.4,lightPosX:1.7,lightPosY:.7,lightPosZ:-1.9,lightIntensity:.02,depthScale:.05,depthLayers:16,Texture:"Spiral","Reset Light"(){}},S=p.createTexture({size:[o.width,o.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),P=p.createBuffer({size:256,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),T=p.createBuffer({size:7*Float32Array.BYTES_PER_ELEMENT,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST});{let w=await b(p,["wood_diffuse.png","spiral_normal.png","spiral_height.png"]);n=w}{let B=await b(p,["wood_diffuse.png","toybox_normal.png","toybox_height.png"]);t=B}{let G=await b(p,["brickwall_diffuse.png","brickwall_normal.png","brickwall_height.png"]);r=G}let M=p.createSampler({magFilter:"linear",minFilter:"linear"}),U={colorAttachments:[{view:void 0,clearValue:{r:0,g:0,b:0,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:S.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},E=l(p,m(1,1,1)),R=h([0,1],[GPUShaderStage.VERTEX|GPUShaderStage.FRAGMENT,GPUShaderStage.FRAGMENT|GPUShaderStage.VERTEX],["buffer","buffer"],[{type:"uniform"},{type:"uniform"}],[[{buffer:P},{buffer:T}]],"Frame",p),V=h([0,1,2,3],[GPUShaderStage.FRAGMENT],["sampler","texture","texture","texture"],[{type:"filtering"},{sampleType:"float"},{sampleType:"float"},{sampleType:"float"}],[[M,n.diffuse.createView(),n.normal.createView(),n.height.createView()],[M,t.diffuse.createView(),t.normal.createView(),t.height.createView()],[M,r.diffuse.createView(),r.normal.createView(),r.height.createView()]],"Surface",p),A=o.width/o.height,_=i._E.perspective(2*Math.PI/5,A,.1,10),D=()=>{switch(y["Bump Mode"]){case"Diffuse Texture":return 0;case"Normal Texture":return 1;case"Depth Texture":return 2;case"Normal Map":return 3;case"Parallax Scale":return 4;case"Steep Parallax":return 5}},F=x(p,"NormalMappingRender",[R.bindGroupLayout,V.bindGroupLayout],s,["float32x3","float32x3","float32x2","float32x3","float32x3"],s,v,!0),L=0,C=()=>{L=a[y.Texture]};d.add(y,"Bump Mode",["Diffuse Texture","Normal Texture","Depth Texture","Normal Map","Parallax Scale","Steep Parallax"]),d.add(y,"Texture",["Spiral","Toybox","BrickWall"]).onChange(C);let I=d.addFolder("Light"),N=d.addFolder("Depth");I.add(y,"Reset Light").onChange(()=>{j.setValue(1.7),X.setValue(-.7),Y.setValue(1.9),O.setValue(.02)});let j=I.add(y,"lightPosX",-5,5).step(.1),X=I.add(y,"lightPosY",-5,5).step(.1),Y=I.add(y,"lightPosZ",-5,5).step(.1),O=I.add(y,"lightIntensity",0,.1).step(.002);N.add(y,"depthScale",0,.1).step(.01),N.add(y,"depthLayers",1,32).step(1),requestAnimationFrame(function e(){if(!u.active)return;let n=i._E.lookAt([y.cameraPosX,y.cameraPosY,y.cameraPosZ],[0,0,0],[0,1,0]),t=function(){let e=i._E.create();i._E.identity(e),i._E.rotateX(e,10,e);let n=Date.now()/1e3;return i._E.rotateY(e,-.5*n,e),e}(),r=new Float32Array([..._,...n,...t]),a=D();p.queue.writeBuffer(P,0,r.buffer,r.byteOffset,r.byteLength),p.queue.writeBuffer(T,0,new Uint32Array([a])),p.queue.writeBuffer(T,4,new Float32Array([y.lightPosX,y.lightPosY,y.lightPosZ,y.lightIntensity,y.depthScale,y.depthLayers])),U.colorAttachments[0].view=g.getCurrentTexture().createView();let o=p.createCommandEncoder(),s=o.beginRenderPass(U);s.setPipeline(F),s.setBindGroup(0,R.bindGroups[0]),s.setBindGroup(1,V.bindGroups[L]),s.setVertexBuffer(0,E.vertexBuffer),s.setIndexBuffer(E.indexBuffer,"uint16"),s.drawIndexed(E.indexCount),s.end(),p.queue.submit([o.finish()]),requestAnimationFrame(e)})},P=()=>(0,o.Tl)({name:"Normal Mapping",description:"This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.",gui:!0,init:S,sources:[{name:y.substring(21),contents:"import { mat4 } from 'wgpu-matrix';\nimport { makeSample, SampleInit } from '../../components/SampleLayout';\nimport normalMapWGSL from './normalMap.wgsl';\nimport { createMeshRenderable } from '../../meshes/mesh';\nimport { createBoxMeshWithTangents } from '../../meshes/box';\nimport {\n PBRDescriptor,\n createPBRDescriptor,\n createBindGroupDescriptor,\n create3DRenderPipeline,\n} from './utils';\n\nconst MAT4X4_BYTES = 64;\nenum TextureAtlas {\n Spiral,\n Toybox,\n BrickWall,\n}\n\nconst init: SampleInit = async ({ canvas, pageState, gui }) => {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n if (!pageState.active) return;\n const context = canvas.getContext('webgpu') as GPUCanvasContext;\n const devicePixelRatio = window.devicePixelRatio;\n canvas.width = canvas.clientWidth * devicePixelRatio;\n canvas.height = canvas.clientHeight * devicePixelRatio;\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n interface GUISettings {\n 'Bump Mode':\n | 'Diffuse Texture'\n | 'Normal Texture'\n | 'Depth Texture'\n | 'Normal Map'\n | 'Parallax Scale'\n | 'Steep Parallax';\n cameraPosX: number;\n cameraPosY: number;\n cameraPosZ: number;\n lightPosX: number;\n lightPosY: number;\n lightPosZ: number;\n lightIntensity: number;\n depthScale: number;\n depthLayers: number;\n Texture: string;\n 'Reset Light': () => void;\n }\n\n const settings: GUISettings = {\n 'Bump Mode': 'Normal Map',\n cameraPosX: 0.0,\n cameraPosY: 0.8,\n cameraPosZ: -1.4,\n lightPosX: 1.7,\n lightPosY: 0.7,\n lightPosZ: -1.9,\n lightIntensity: 0.02,\n depthScale: 0.05,\n depthLayers: 16,\n Texture: 'Spiral',\n 'Reset Light': () => {\n return;\n },\n };\n\n // Create normal mapping resources and pipeline\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBuffer = device.createBuffer({\n // Buffer holding projection, view, and model matrices plus padding bytes\n size: MAT4X4_BYTES * 4,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const mapMethodBuffer = device.createBuffer({\n // Buffer holding mapping type, light uniforms, and depth uniforms\n size: Float32Array.BYTES_PER_ELEMENT * 7,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n // Create PBR info (diffuse, normal, and depth/height textures)\n let spiralPBR: Required;\n {\n const response = await createPBRDescriptor(device, [\n 'wood_diffuse.png',\n 'spiral_normal.png',\n 'spiral_height.png',\n ]);\n spiralPBR = response as Required;\n }\n\n let toyboxPBR: Required;\n {\n const response = await createPBRDescriptor(device, [\n 'wood_diffuse.png',\n 'toybox_normal.png',\n 'toybox_height.png',\n ]);\n toyboxPBR = response as Required;\n }\n\n let brickWallPBR: Required;\n {\n const response = await createPBRDescriptor(device, [\n 'brickwall_diffuse.png',\n 'brickwall_normal.png',\n 'brickwall_height.png',\n ]);\n brickWallPBR = response as Required;\n }\n\n // Create a sampler with linear filtering for smooth interpolation.\n const sampler = device.createSampler({\n magFilter: 'linear',\n minFilter: 'linear',\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const box = createMeshRenderable(\n device,\n createBoxMeshWithTangents(1.0, 1.0, 1.0)\n );\n\n // Uniform bindGroups and bindGroupLayout\n const frameBGDescriptor = createBindGroupDescriptor(\n [0, 1],\n [\n GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,\n GPUShaderStage.FRAGMENT | GPUShaderStage.VERTEX,\n ],\n ['buffer', 'buffer'],\n [{ type: 'uniform' }, { type: 'uniform' }],\n [[{ buffer: uniformBuffer }, { buffer: mapMethodBuffer }]],\n 'Frame',\n device\n );\n\n // Texture bindGroups and bindGroupLayout\n const surfaceBGDescriptor = createBindGroupDescriptor(\n [0, 1, 2, 3],\n [GPUShaderStage.FRAGMENT],\n ['sampler', 'texture', 'texture', 'texture'],\n [\n { type: 'filtering' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n { sampleType: 'float' },\n ],\n // Multiple bindgroups that accord to the layout defined above\n [\n [\n sampler,\n spiralPBR.diffuse.createView(),\n spiralPBR.normal.createView(),\n spiralPBR.height.createView(),\n ],\n [\n sampler,\n toyboxPBR.diffuse.createView(),\n toyboxPBR.normal.createView(),\n toyboxPBR.height.createView(),\n ],\n [\n sampler,\n brickWallPBR.diffuse.createView(),\n brickWallPBR.normal.createView(),\n brickWallPBR.height.createView(),\n ],\n ],\n 'Surface',\n device\n );\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 0.1,\n 10.0\n ) as Float32Array;\n\n function getViewMatrix() {\n return mat4.lookAt(\n [settings.cameraPosX, settings.cameraPosY, settings.cameraPosZ],\n [0, 0, 0],\n [0, 1, 0]\n );\n }\n\n function getModelMatrix() {\n const modelMatrix = mat4.create();\n mat4.identity(modelMatrix);\n mat4.rotateX(modelMatrix, 10, modelMatrix);\n const now = Date.now() / 1000;\n mat4.rotateY(modelMatrix, now * -0.5, modelMatrix);\n return modelMatrix;\n }\n\n // Change the model mapping type\n const getMappingType = (): number => {\n switch (settings['Bump Mode']) {\n case 'Diffuse Texture':\n return 0;\n case 'Normal Texture':\n return 1;\n case 'Depth Texture':\n return 2;\n case 'Normal Map':\n return 3;\n case 'Parallax Scale':\n return 4;\n case 'Steep Parallax':\n return 5;\n }\n };\n\n const texturedCubePipeline = create3DRenderPipeline(\n device,\n 'NormalMappingRender',\n [frameBGDescriptor.bindGroupLayout, surfaceBGDescriptor.bindGroupLayout],\n normalMapWGSL,\n // Position, normal uv tangent bitangent\n ['float32x3', 'float32x3', 'float32x2', 'float32x3', 'float32x3'],\n normalMapWGSL,\n presentationFormat,\n true\n );\n\n let currentSurfaceBindGroup = 0;\n const onChangeTexture = () => {\n currentSurfaceBindGroup = TextureAtlas[settings.Texture];\n };\n\n gui.add(settings, 'Bump Mode', [\n 'Diffuse Texture',\n 'Normal Texture',\n 'Depth Texture',\n 'Normal Map',\n 'Parallax Scale',\n 'Steep Parallax',\n ]);\n gui\n .add(settings, 'Texture', ['Spiral', 'Toybox', 'BrickWall'])\n .onChange(onChangeTexture);\n const lightFolder = gui.addFolder('Light');\n const depthFolder = gui.addFolder('Depth');\n lightFolder.add(settings, 'Reset Light').onChange(() => {\n lightPosXController.setValue(1.7);\n lightPosYController.setValue(-0.7);\n lightPosZController.setValue(1.9);\n lightIntensityController.setValue(0.02);\n });\n const lightPosXController = lightFolder\n .add(settings, 'lightPosX', -5, 5)\n .step(0.1);\n const lightPosYController = lightFolder\n .add(settings, 'lightPosY', -5, 5)\n .step(0.1);\n const lightPosZController = lightFolder\n .add(settings, 'lightPosZ', -5, 5)\n .step(0.1);\n const lightIntensityController = lightFolder\n .add(settings, 'lightIntensity', 0.0, 0.1)\n .step(0.002);\n depthFolder.add(settings, 'depthScale', 0.0, 0.1).step(0.01);\n depthFolder.add(settings, 'depthLayers', 1, 32).step(1);\n\n function frame() {\n if (!pageState.active) return;\n\n // Write to normal map shader\n const viewMatrix = getViewMatrix();\n\n const modelMatrix = getModelMatrix();\n\n const matrices = new Float32Array([\n ...projectionMatrix,\n ...viewMatrix,\n ...modelMatrix,\n ]);\n\n const mappingType = getMappingType();\n\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n matrices.buffer,\n matrices.byteOffset,\n matrices.byteLength\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 0,\n new Uint32Array([mappingType])\n );\n\n device.queue.writeBuffer(\n mapMethodBuffer,\n 4,\n new Float32Array([\n settings.lightPosX,\n settings.lightPosY,\n settings.lightPosZ,\n settings.lightIntensity,\n settings.depthScale,\n settings.depthLayers,\n ])\n );\n\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n // Draw textured Cube\n passEncoder.setPipeline(texturedCubePipeline);\n passEncoder.setBindGroup(0, frameBGDescriptor.bindGroups[0]);\n passEncoder.setBindGroup(\n 1,\n surfaceBGDescriptor.bindGroups[currentSurfaceBindGroup]\n );\n passEncoder.setVertexBuffer(0, box.vertexBuffer);\n passEncoder.setIndexBuffer(box.indexBuffer, 'uint16');\n passEncoder.drawIndexed(box.indexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n requestAnimationFrame(frame);\n};\n\nconst NormalMapping: () => JSX.Element = () =>\n makeSample({\n name: 'Normal Mapping',\n description:\n 'This example demonstrates multiple different methods that employ fragment shaders to achieve additional perceptual depth on the surface of a cube mesh. Demonstrated methods include normal mapping, parallax mapping, and steep parallax mapping.',\n gui: true,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './normalMap.wgsl',\n contents: normalMapWGSL,\n editable: true,\n },\n {\n name: '../../meshes/box.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/box.ts').default,\n },\n {\n name: '../../meshes/mesh.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/mesh.ts').default,\n },\n {\n name: './utils.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!./utils.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default NormalMapping;\n"},{name:"./normalMap.wgsl",contents:s,editable:!0},{name:"../../meshes/box.ts",contents:t(3583).Z},{name:"../../meshes/mesh.ts",contents:t(3150).Z},{name:"./utils.ts",contents:t(1146).Z}],filename:y});var T=P},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},3583:function(e,n){"use strict";n.Z="import { vec3 } from 'wgpu-matrix';\nimport { getMeshPosAtIndex, getMeshUVAtIndex, Mesh } from './mesh';\n\nexport interface BoxMesh extends Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n//// Borrowed and simplified from https://github.com/mrdoob/three.js/blob/master/src/geometries/BoxGeometry.js\n//// Presumes vertex buffer alignment of verts, normals, and uvs\nconst createBoxGeometry = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n) => {\n widthSegments = Math.floor(widthSegments);\n heightSegments = Math.floor(heightSegments);\n depthSegments = Math.floor(depthSegments);\n\n const indices = [];\n const vertNormalUVBuffer = [];\n\n let numVertices = 0;\n\n const buildPlane = (\n u: 0 | 1 | 2,\n v: 0 | 1 | 2,\n w: 0 | 1 | 2,\n udir: -1 | 1,\n vdir: -1 | 1,\n planeWidth: number,\n planeHeight: number,\n planeDepth: number,\n xSections: number,\n ySections: number\n ) => {\n const segmentWidth = planeWidth / xSections;\n const segmentHeight = planeHeight / ySections;\n\n const widthHalf = planeWidth / 2;\n const heightHalf = planeHeight / 2;\n const depthHalf = planeDepth / 2;\n\n const gridX1 = xSections + 1;\n const gridY1 = ySections + 1;\n\n let vertexCounter = 0;\n\n const vertex = vec3.create();\n const normal = vec3.create();\n for (let iy = 0; iy < gridY1; iy++) {\n const y = iy * segmentHeight - heightHalf;\n\n for (let ix = 0; ix < gridX1; ix++) {\n const x = ix * segmentWidth - widthHalf;\n\n //Calculate plane vertices\n vertex[u] = x * udir;\n vertex[v] = y * vdir;\n vertex[w] = depthHalf;\n vertNormalUVBuffer.push(...vertex);\n\n //Caclulate normal\n normal[u] = 0;\n normal[v] = 0;\n normal[w] = planeDepth > 0 ? 1.0 : -1.0;\n vertNormalUVBuffer.push(...normal);\n\n //Calculate uvs\n vertNormalUVBuffer.push(ix / xSections);\n vertNormalUVBuffer.push(1 - iy / ySections);\n\n vertexCounter += 1;\n }\n }\n\n for (let iy = 0; iy < ySections; iy++) {\n for (let ix = 0; ix < xSections; ix++) {\n const a = numVertices + ix + gridX1 * iy;\n const b = numVertices + ix + gridX1 * (iy + 1);\n const c = numVertices + (ix + 1) + gridX1 * (iy + 1);\n const d = numVertices + (ix + 1) + gridX1 * iy;\n\n //Push vertex indices\n //6 indices for each face\n indices.push(a, b, d);\n indices.push(b, c, d);\n\n numVertices += vertexCounter;\n }\n }\n };\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n -1,\n -1,\n depth,\n height,\n width,\n depthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 2, //z\n 1, //y\n 0, //x\n 1,\n -1,\n depth,\n height,\n -width,\n depthSegments,\n heightSegments\n );\n\n //Bottom face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n 1,\n width,\n depth,\n height,\n widthSegments,\n depthSegments\n );\n\n //Top face\n buildPlane(\n 0, //x\n 2, //z\n 1, //y\n 1,\n -1,\n width,\n depth,\n -height,\n widthSegments,\n depthSegments\n );\n\n //Side faces\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n 1,\n -1,\n width,\n height,\n depth,\n widthSegments,\n heightSegments\n );\n\n //Side face\n buildPlane(\n 0, //x\n 1, //y\n 2, //z\n -1,\n -1,\n width,\n height,\n -depth,\n widthSegments,\n heightSegments\n );\n\n return {\n vertices: vertNormalUVBuffer,\n indices: indices,\n };\n};\n\ntype IndexFormat = 'uint16' | 'uint32';\n\n// Box mesh code ported from threejs, with addition of indexFormat specifier for vertex pulling\nexport const createBoxMesh = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0,\n indexFormat: IndexFormat = 'uint16'\n): Mesh => {\n const { vertices, indices } = createBoxGeometry(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const vertexStride = 8 * Float32Array.BYTES_PER_ELEMENT; //calculateVertexStride(vertexProperties);\n\n const indicesArray =\n indexFormat === 'uint16'\n ? new Uint16Array(indices)\n : new Uint32Array(indices);\n\n return {\n vertices: new Float32Array(vertices),\n indices: indicesArray,\n vertexStride: vertexStride,\n };\n};\n\nexport const createBoxMeshWithTangents = (\n width = 1.0,\n height = 1.0,\n depth = 1.0,\n widthSegments = 1.0,\n heightSegments = 1.0,\n depthSegments = 1.0\n): Mesh => {\n const mesh = createBoxMesh(\n width,\n height,\n depth,\n widthSegments,\n heightSegments,\n depthSegments\n );\n\n const originalStrideElements =\n mesh.vertexStride / Float32Array.BYTES_PER_ELEMENT;\n\n const vertexCount = mesh.vertices.length / originalStrideElements;\n\n const tangents = new Array(vertexCount);\n const bitangents = new Array(vertexCount);\n const counts = new Array(vertexCount);\n for (let i = 0; i < vertexCount; i++) {\n tangents[i] = [0, 0, 0];\n bitangents[i] = [0, 0, 0];\n counts[i] = 0;\n }\n\n for (let i = 0; i < mesh.indices.length; i += 3) {\n const [idx1, idx2, idx3] = [\n mesh.indices[i],\n mesh.indices[i + 1],\n mesh.indices[i + 2],\n ];\n\n const [pos1, pos2, pos3] = [\n getMeshPosAtIndex(mesh, idx1),\n getMeshPosAtIndex(mesh, idx2),\n getMeshPosAtIndex(mesh, idx3),\n ];\n\n const [uv1, uv2, uv3] = [\n getMeshUVAtIndex(mesh, idx1),\n getMeshUVAtIndex(mesh, idx2),\n getMeshUVAtIndex(mesh, idx3),\n ];\n\n const edge1 = vec3.sub(pos2, pos1);\n const edge2 = vec3.sub(pos3, pos1);\n const deltaUV1 = vec3.sub(uv2, uv1);\n const deltaUV2 = vec3.sub(uv3, uv1);\n\n // Edge of a triangle moves in both u and v direction (2d)\n // deltaU * tangent vector + deltav * bitangent\n // Manipulating the data into matrices, we get an equation\n\n const constantVal =\n 1.0 / (deltaUV1[0] * deltaUV2[1] - deltaUV1[1] * deltaUV2[0]);\n\n const tangent = [\n constantVal * (deltaUV2[1] * edge1[0] - deltaUV1[1] * edge2[0]),\n constantVal * (deltaUV2[1] * edge1[1] - deltaUV1[1] * edge2[1]),\n constantVal * (deltaUV2[1] * edge1[2] - deltaUV1[1] * edge2[2]),\n ];\n\n const bitangent = [\n constantVal * (-deltaUV2[0] * edge1[0] + deltaUV1[0] * edge2[0]),\n constantVal * (-deltaUV2[0] * edge1[1] + deltaUV1[0] * edge2[1]),\n constantVal * (-deltaUV2[0] * edge1[2] + deltaUV1[0] * edge2[2]),\n ];\n\n //Accumulate tangents and bitangents\n tangents[idx1] = vec3.add(tangents[idx1], tangent);\n bitangents[idx1] = vec3.add(bitangents[idx1], bitangent);\n tangents[idx2] = vec3.add(tangents[idx2], tangent);\n bitangents[idx2] = vec3.add(bitangents[idx2], bitangent);\n tangents[idx3] = vec3.add(tangents[idx3], tangent);\n bitangents[idx3] = vec3.add(bitangents[idx3], bitangent);\n\n //Increment index count\n counts[idx1]++;\n counts[idx2]++;\n counts[idx3]++;\n }\n\n for (let i = 0; i < tangents.length; i++) {\n tangents[i] = vec3.divScalar(tangents[i], counts[i]);\n bitangents[i] = vec3.divScalar(bitangents[i], counts[i]);\n }\n\n const newStrideElements = 14;\n const wTangentArray = new Float32Array(vertexCount * newStrideElements);\n\n for (let i = 0; i < vertexCount; i++) {\n //Copy original vertex data (pos, normal uv)\n wTangentArray.set(\n //Get the original vertex [8 elements] (3 ele pos, 3 ele normal, 2 ele uv)\n mesh.vertices.subarray(\n i * originalStrideElements,\n (i + 1) * originalStrideElements\n ),\n //And put it at the proper location in the new array [14 bytes = 8 og + 6 empty]\n i * newStrideElements\n );\n //For each vertex, place tangent after originalStride\n wTangentArray.set(\n tangents[i],\n i * newStrideElements + originalStrideElements\n );\n //Place bitangent after 3 elements of tangent\n wTangentArray.set(\n bitangents[i],\n i * newStrideElements + originalStrideElements + 3\n );\n }\n\n return {\n vertices: wTangentArray,\n indices: mesh.indices,\n vertexStride: mesh.vertexStride + Float32Array.BYTES_PER_ELEMENT * 3 * 2,\n };\n};\n"},3150:function(e,n){"use strict";n.Z="import { vec3, vec2 } from 'wgpu-matrix';\n\n// Defines what to pass to pipeline to render mesh\nexport interface Renderable {\n vertexBuffer: GPUBuffer;\n indexBuffer: GPUBuffer;\n indexCount: number;\n bindGroup?: GPUBindGroup;\n}\n\nexport interface Mesh {\n vertices: Float32Array;\n indices: Uint16Array | Uint32Array;\n vertexStride: number;\n}\n\n/**\n * @param {GPUDevice} device - A valid GPUDevice.\n * @param {Mesh} mesh - An indexed triangle-list mesh, containing its vertices, indices, and vertexStride (number of elements per vertex).\n * @param {boolean} storeVertices - A boolean flag indicating whether the vertexBuffer should be available to use as a storage buffer.\n * @returns {boolean} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createMeshRenderable = (\n device: GPUDevice,\n mesh: Mesh,\n storeVertices = false,\n storeIndices = false\n): Renderable => {\n // Define buffer usage\n const vertexBufferUsage = storeVertices\n ? GPUBufferUsage.VERTEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.VERTEX;\n const indexBufferUsage = storeIndices\n ? GPUBufferUsage.INDEX | GPUBufferUsage.STORAGE\n : GPUBufferUsage.INDEX;\n\n // Create vertex and index buffers\n const vertexBuffer = device.createBuffer({\n size: mesh.vertices.byteLength,\n usage: vertexBufferUsage,\n mappedAtCreation: true,\n });\n new Float32Array(vertexBuffer.getMappedRange()).set(mesh.vertices);\n vertexBuffer.unmap();\n\n const indexBuffer = device.createBuffer({\n size: mesh.indices.byteLength,\n usage: indexBufferUsage,\n mappedAtCreation: true,\n });\n\n // Determine whether index buffer is indices are in uint16 or uint32 format\n if (\n mesh.indices.byteLength ===\n mesh.indices.length * Uint16Array.BYTES_PER_ELEMENT\n ) {\n new Uint16Array(indexBuffer.getMappedRange()).set(mesh.indices);\n } else {\n new Uint32Array(indexBuffer.getMappedRange()).set(mesh.indices);\n }\n\n indexBuffer.unmap();\n\n return {\n vertexBuffer,\n indexBuffer,\n indexCount: mesh.indices.length,\n };\n};\n\nexport const getMeshPosAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 0,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshNormalAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 3 * Float32Array.BYTES_PER_ELEMENT,\n 3\n );\n return vec3.fromValues(arr[0], arr[1], arr[2]);\n};\n\nexport const getMeshUVAtIndex = (mesh: Mesh, index: number) => {\n const arr = new Float32Array(\n mesh.vertices.buffer,\n index * mesh.vertexStride + 6 * Float32Array.BYTES_PER_ELEMENT,\n 2\n );\n return vec2.fromValues(arr[0], arr[1]);\n};\n"},1146:function(e,n){"use strict";n.Z="type BindGroupBindingLayout =\n | GPUBufferBindingLayout\n | GPUTextureBindingLayout\n | GPUSamplerBindingLayout\n | GPUStorageTextureBindingLayout\n | GPUExternalTextureBindingLayout;\n\nexport type BindGroupsObjectsAndLayout = {\n bindGroups: GPUBindGroup[];\n bindGroupLayout: GPUBindGroupLayout;\n};\n\ntype ResourceTypeName =\n | 'buffer'\n | 'texture'\n | 'sampler'\n | 'externalTexture'\n | 'storageTexture';\n\n/**\n * @param {number[]} bindings - The binding value of each resource in the bind group.\n * @param {number[]} visibilities - The GPUShaderStage visibility of the resource at the corresponding index.\n * @param {ResourceTypeName[]} resourceTypes - The resourceType at the corresponding index.\n * @returns {BindGroupsObjectsAndLayout} An object containing an array of bindGroups and the bindGroupLayout they implement.\n */\nexport const createBindGroupDescriptor = (\n bindings: number[],\n visibilities: number[],\n resourceTypes: ResourceTypeName[],\n resourceLayouts: BindGroupBindingLayout[],\n resources: GPUBindingResource[][],\n label: string,\n device: GPUDevice\n): BindGroupsObjectsAndLayout => {\n // Create layout of each entry within a bindGroup\n const layoutEntries: GPUBindGroupLayoutEntry[] = [];\n for (let i = 0; i < bindings.length; i++) {\n layoutEntries.push({\n binding: bindings[i],\n visibility: visibilities[i % visibilities.length],\n [resourceTypes[i]]: resourceLayouts[i],\n });\n }\n\n // Apply entry layouts to bindGroupLayout\n const bindGroupLayout = device.createBindGroupLayout({\n label: `${label}.bindGroupLayout`,\n entries: layoutEntries,\n });\n\n // Create bindGroups that conform to the layout\n const bindGroups: GPUBindGroup[] = [];\n for (let i = 0; i < resources.length; i++) {\n const groupEntries: GPUBindGroupEntry[] = [];\n for (let j = 0; j < resources[0].length; j++) {\n groupEntries.push({\n binding: j,\n resource: resources[i][j],\n });\n }\n const newBindGroup = device.createBindGroup({\n label: `${label}.bindGroup${i}`,\n layout: bindGroupLayout,\n entries: groupEntries,\n });\n bindGroups.push(newBindGroup);\n }\n\n return {\n bindGroups,\n bindGroupLayout,\n };\n};\n\nexport type ShaderKeyInterface = {\n [K in T[number]]: number;\n};\n\ninterface AttribAcc {\n attributes: GPUVertexAttribute[];\n arrayStride: number;\n}\n\n/**\n * @param {GPUVertexFormat} vf - A valid GPUVertexFormat, representing a per-vertex value that can be passed to the vertex shader.\n * @returns {number} The number of bytes present in the value to be passed.\n */\nexport const convertVertexFormatToBytes = (vf: GPUVertexFormat): number => {\n const splitFormat = vf.split('x');\n const bytesPerElement = parseInt(splitFormat[0].replace(/[^0-9]/g, '')) / 8;\n\n const bytesPerVec =\n bytesPerElement *\n (splitFormat[1] !== undefined ? parseInt(splitFormat[1]) : 1);\n\n return bytesPerVec;\n};\n\n/** Creates a GPUVertexBuffer Layout that maps to an interleaved vertex buffer.\n * @param {GPUVertexFormat[]} vertexFormats - An array of valid GPUVertexFormats.\n * @returns {GPUVertexBufferLayout} A GPUVertexBufferLayout representing an interleaved vertex buffer.\n */\nexport const createVBuffer = (\n vertexFormats: GPUVertexFormat[]\n): GPUVertexBufferLayout => {\n const initialValue: AttribAcc = { attributes: [], arrayStride: 0 };\n\n const vertexBuffer = vertexFormats.reduce(\n (acc: AttribAcc, curr: GPUVertexFormat, idx: number) => {\n const newAttribute: GPUVertexAttribute = {\n shaderLocation: idx,\n offset: acc.arrayStride,\n format: curr,\n };\n const nextOffset: number =\n acc.arrayStride + convertVertexFormatToBytes(curr);\n\n const retVal: AttribAcc = {\n attributes: [...acc.attributes, newAttribute],\n arrayStride: nextOffset,\n };\n return retVal;\n },\n initialValue\n );\n\n const layout: GPUVertexBufferLayout = {\n arrayStride: vertexBuffer.arrayStride,\n attributes: vertexBuffer.attributes,\n };\n\n return layout;\n};\n\nexport const create3DRenderPipeline = (\n device: GPUDevice,\n label: string,\n bgLayouts: GPUBindGroupLayout[],\n vertexShader: string,\n vBufferFormats: GPUVertexFormat[],\n fragmentShader: string,\n presentationFormat: GPUTextureFormat,\n depthTest = false,\n topology: GPUPrimitiveTopology = 'triangle-list',\n cullMode: GPUCullMode = 'back'\n) => {\n const pipelineDescriptor: GPURenderPipelineDescriptor = {\n label: `${label}.pipeline`,\n layout: device.createPipelineLayout({\n label: `${label}.pipelineLayout`,\n bindGroupLayouts: bgLayouts,\n }),\n vertex: {\n module: device.createShaderModule({\n label: `${label}.vertexShader`,\n code: vertexShader,\n }),\n entryPoint: 'vertexMain',\n buffers:\n vBufferFormats.length !== 0 ? [createVBuffer(vBufferFormats)] : [],\n },\n fragment: {\n module: device.createShaderModule({\n label: `${label}.fragmentShader`,\n code: fragmentShader,\n }),\n entryPoint: 'fragmentMain',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: topology,\n cullMode: cullMode,\n },\n };\n if (depthTest) {\n pipelineDescriptor.depthStencil = {\n depthCompare: 'less',\n depthWriteEnabled: true,\n format: 'depth24plus',\n };\n }\n return device.createRenderPipeline(pipelineDescriptor);\n};\n\nexport const createTextureFromImage = (\n device: GPUDevice,\n bitmap: ImageBitmap\n) => {\n const texture: GPUTexture = device.createTexture({\n size: [bitmap.width, bitmap.height, 1],\n format: 'rgba8unorm',\n usage:\n GPUTextureUsage.TEXTURE_BINDING |\n GPUTextureUsage.COPY_DST |\n GPUTextureUsage.RENDER_ATTACHMENT,\n });\n device.queue.copyExternalImageToTexture(\n { source: bitmap },\n { texture: texture },\n [bitmap.width, bitmap.height]\n );\n return texture;\n};\n\nexport interface PBRDescriptor {\n diffuse?: GPUTexture;\n normal?: GPUTexture;\n height?: GPUTexture;\n}\n\ninterface URLLoad {\n url: string;\n type: keyof PBRDescriptor;\n}\n\nexport const createPBRDescriptor = async (\n device: GPUDevice,\n urls: string[]\n): Promise => {\n const imgAssetPrepend = '/img/';\n const loads = urls.map((url) => {\n const splits = url.split('_');\n const ttype = splits[splits.length - 1].split('.')[0];\n const load: URLLoad = {\n url: imgAssetPrepend + url,\n type: ttype as keyof PBRDescriptor,\n };\n return load;\n });\n console.log(loads);\n const pbr: PBRDescriptor = {};\n for (let i = 0; i < loads.length; i++) {\n console.log(loads[i].url);\n console.log(import.meta.url);\n let texture: GPUTexture;\n {\n const response = await fetch(loads[i].url);\n const imageBitmap = await createImageBitmap(await response.blob());\n texture = createTextureFromImage(device, imageBitmap);\n }\n\n console.log(loads[i].type);\n\n switch (loads[i].type) {\n case 'diffuse':\n {\n pbr.diffuse = texture;\n }\n break;\n case 'height':\n {\n pbr.height = texture;\n }\n break;\n case 'normal':\n {\n pbr.normal = texture;\n }\n break;\n }\n }\n return pbr;\n};\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/webpack-719b96d63423102a.js b/_next/static/chunks/webpack-07156a97204665ff.js similarity index 98% rename from _next/static/chunks/webpack-719b96d63423102a.js rename to _next/static/chunks/webpack-07156a97204665ff.js index 36273b46..896cb4f0 100644 --- a/_next/static/chunks/webpack-719b96d63423102a.js +++ b/_next/static/chunks/webpack-07156a97204665ff.js @@ -1 +1 @@ -!function(){"use strict";var e,t,r,n,f,a,o,c,i,u,b={},d={};function l(e){var t=d[e];if(void 0!==t)return t.exports;var r=d[e]={exports:{}},n=!0;try{b[e].call(r.exports,r,r.exports,l),n=!1}finally{n&&delete d[e]}return r.exports}l.m=b,e=[],l.O=function(t,r,n,f){if(r){f=f||0;for(var a=e.length;a>0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var o=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(c=!1,f0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var o=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(c=!1,fWebGPU Samples \ No newline at end of file +WebGPU Samples \ No newline at end of file diff --git a/samples/A-buffer.html b/samples/A-buffer.html index 0f84d1bb..8dbdfd3d 100644 --- a/samples/A-buffer.html +++ b/samples/A-buffer.html @@ -10,6 +10,6 @@ } A-Buffer - WebGPU Samples

A-Buffer

See it on Github!

Demonstrates order independent transparency using a per-pixel + limiting memory usage (when required)."/>

\ No newline at end of file + limiting memory usage (when required).

\ No newline at end of file diff --git a/samples/animometer.html b/samples/animometer.html index fe9e2eed..227e1cee 100644 --- a/samples/animometer.html +++ b/samples/animometer.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Animometer - WebGPU Samples \ No newline at end of file + Animometer - WebGPU Samples \ No newline at end of file diff --git a/samples/bitonicSort.html b/samples/bitonicSort.html index 116b7866..a0638f24 100644 --- a/samples/bitonicSort.html +++ b/samples/bitonicSort.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file + Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file diff --git a/samples/cameras.html b/samples/cameras.html index cb55e44c..fdc0f3bd 100644 --- a/samples/cameras.html +++ b/samples/cameras.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cameras - WebGPU Samples \ No newline at end of file + Cameras - WebGPU Samples \ No newline at end of file diff --git a/samples/computeBoids.html b/samples/computeBoids.html index 2cea3f47..80552bf5 100644 --- a/samples/computeBoids.html +++ b/samples/computeBoids.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Compute Boids - WebGPU Samples \ No newline at end of file + Compute Boids - WebGPU Samples \ No newline at end of file diff --git a/samples/cornell.html b/samples/cornell.html index e9164fd3..c5a3b278 100644 --- a/samples/cornell.html +++ b/samples/cornell.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cornell box - WebGPU Samples \ No newline at end of file + Cornell box - WebGPU Samples \ No newline at end of file diff --git a/samples/cubemap.html b/samples/cubemap.html index f3bf0586..808886e7 100644 --- a/samples/cubemap.html +++ b/samples/cubemap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cubemap - WebGPU Samples \ No newline at end of file + Cubemap - WebGPU Samples \ No newline at end of file diff --git a/samples/deferredRendering.html b/samples/deferredRendering.html index ce1842b6..5814135b 100644 --- a/samples/deferredRendering.html +++ b/samples/deferredRendering.html @@ -16,7 +16,7 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. - "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. + "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. Render geometry info to multiple targets in the gBuffers in the first pass. In this sample we have 2 gBuffers for normals and albedo, along with a depth texture. And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity. @@ -24,4 +24,4 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/fractalCube.html b/samples/fractalCube.html index c923fa15..fb5b2ed0 100644 --- a/samples/fractalCube.html +++ b/samples/fractalCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Fractal Cube - WebGPU Samples \ No newline at end of file + Fractal Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/gameOfLife.html b/samples/gameOfLife.html index 5e408632..762060a1 100644 --- a/samples/gameOfLife.html +++ b/samples/gameOfLife.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Conway's Game of Life - WebGPU Samples \ No newline at end of file + Conway's Game of Life - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangle.html b/samples/helloTriangle.html index 65a05e41..a07c3904 100644 --- a/samples/helloTriangle.html +++ b/samples/helloTriangle.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle - WebGPU Samples \ No newline at end of file + Hello Triangle - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangleMSAA.html b/samples/helloTriangleMSAA.html index 848dba96..823bf106 100644 --- a/samples/helloTriangleMSAA.html +++ b/samples/helloTriangleMSAA.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle MSAA - WebGPU Samples \ No newline at end of file + Hello Triangle MSAA - WebGPU Samples \ No newline at end of file diff --git a/samples/imageBlur.html b/samples/imageBlur.html index 2b00325e..d55147e3 100644 --- a/samples/imageBlur.html +++ b/samples/imageBlur.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Image Blur - WebGPU Samples \ No newline at end of file + Image Blur - WebGPU Samples \ No newline at end of file diff --git a/samples/instancedCube.html b/samples/instancedCube.html index 17f7804f..d0d34bdd 100644 --- a/samples/instancedCube.html +++ b/samples/instancedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Instanced Cube - WebGPU Samples \ No newline at end of file + Instanced Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/normalMap.html b/samples/normalMap.html index bc061e06..36188ee8 100644 --- a/samples/normalMap.html +++ b/samples/normalMap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Normal Mapping - WebGPU Samples \ No newline at end of file + Normal Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/particles.html b/samples/particles.html index b6ebea33..7d4c30c8 100644 --- a/samples/particles.html +++ b/samples/particles.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Particles - WebGPU Samples \ No newline at end of file + Particles - WebGPU Samples \ No newline at end of file diff --git a/samples/renderBundles.html b/samples/renderBundles.html index 84a9a919..bda7bc35 100644 --- a/samples/renderBundles.html +++ b/samples/renderBundles.html @@ -11,7 +11,7 @@ Render Bundles - WebGPU Samples

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of + of instancing to reduce draw overhead.)"/>

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of meshes individually as a proxy for a more complex scene in order to demonstrate the reduction in JavaScript time spent to issue render commands. (Typically a scene like this would make use - of instancing to reduce draw overhead.)

\ No newline at end of file + of instancing to reduce draw overhead.)

\ No newline at end of file diff --git a/samples/resizeCanvas.html b/samples/resizeCanvas.html index 02c4e711..a7603c9a 100644 --- a/samples/resizeCanvas.html +++ b/samples/resizeCanvas.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Resize Canvas - WebGPU Samples \ No newline at end of file + Resize Canvas - WebGPU Samples \ No newline at end of file diff --git a/samples/reversedZ.html b/samples/reversedZ.html index 6aafe325..58faaab8 100644 --- a/samples/reversedZ.html +++ b/samples/reversedZ.html @@ -17,7 +17,7 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ - "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. + "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. The left column uses regular method, while the right one uses reversed z technique. Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other. Higher sets are placed further from camera (and are scaled for better visual purpose). @@ -26,4 +26,4 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/rotatingCube.html b/samples/rotatingCube.html index bc84fde0..7a383830 100644 --- a/samples/rotatingCube.html +++ b/samples/rotatingCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Rotating Cube - WebGPU Samples \ No newline at end of file + Rotating Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/samplerParameters.html b/samples/samplerParameters.html index 54badffd..c4f8687b 100644 --- a/samples/samplerParameters.html +++ b/samples/samplerParameters.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file + Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file diff --git a/samples/shadowMapping.html b/samples/shadowMapping.html index f38a0f6b..6bda6525 100644 --- a/samples/shadowMapping.html +++ b/samples/shadowMapping.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Shadow Mapping - WebGPU Samples \ No newline at end of file + Shadow Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/texturedCube.html b/samples/texturedCube.html index f99b8a85..2e6c2483 100644 --- a/samples/texturedCube.html +++ b/samples/texturedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Textured Cube - WebGPU Samples \ No newline at end of file + Textured Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/twoCubes.html b/samples/twoCubes.html index 24e13b2a..f4714e63 100644 --- a/samples/twoCubes.html +++ b/samples/twoCubes.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Two Cubes - WebGPU Samples \ No newline at end of file + Two Cubes - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploading.html b/samples/videoUploading.html index 1406311a..70debd79 100644 --- a/samples/videoUploading.html +++ b/samples/videoUploading.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading - WebGPU Samples \ No newline at end of file + Video Uploading - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploadingWebCodecs.html b/samples/videoUploadingWebCodecs.html index 011e5472..c6e8b383 100644 --- a/samples/videoUploadingWebCodecs.html +++ b/samples/videoUploadingWebCodecs.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file + Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file diff --git a/samples/worker.html b/samples/worker.html index 43231b8a..41e694cd 100644 --- a/samples/worker.html +++ b/samples/worker.html @@ -10,6 +10,6 @@ } WebGPU in a Worker - WebGPU Samples

WebGPU in a Worker

See it on Github!

This example shows one method of using WebGPU in a web worker and presenting to + which is then transferred to the worker where all the WebGPU calls are made."/>

\ No newline at end of file + which is then transferred to the worker where all the WebGPU calls are made.

\ No newline at end of file