diff --git a/404.html b/404.html index ab172f58..db151352 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/A-buffer.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/A-buffer.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/A-buffer.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/A-buffer.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/animometer.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/animometer.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/animometer.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/animometer.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/bitonicSort.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/bitonicSort.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/bitonicSort.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/bitonicSort.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/cameras.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/cameras.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/cameras.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/cameras.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/computeBoids.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/computeBoids.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/computeBoids.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/computeBoids.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/cornell.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/cornell.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/cornell.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/cornell.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/cubemap.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/cubemap.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/cubemap.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/cubemap.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/deferredRendering.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/deferredRendering.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/deferredRendering.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/deferredRendering.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/fractalCube.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/fractalCube.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/fractalCube.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/fractalCube.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/gameOfLife.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/gameOfLife.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/gameOfLife.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/gameOfLife.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/helloTriangle.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/helloTriangle.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/helloTriangle.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/helloTriangle.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/helloTriangleMSAA.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/helloTriangleMSAA.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/helloTriangleMSAA.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/helloTriangleMSAA.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/imageBlur.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/imageBlur.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/imageBlur.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/imageBlur.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/instancedCube.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/instancedCube.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/instancedCube.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/instancedCube.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/normalMap.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/normalMap.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/normalMap.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/normalMap.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/particles.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/particles.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/particles.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/particles.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/renderBundles.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/renderBundles.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/renderBundles.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/renderBundles.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/resizeCanvas.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/resizeCanvas.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/resizeCanvas.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/resizeCanvas.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/reversedZ.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/reversedZ.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/reversedZ.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/reversedZ.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/rotatingCube.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/rotatingCube.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/rotatingCube.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/rotatingCube.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/samplerParameters.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/samplerParameters.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/samplerParameters.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/samplerParameters.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/shadowMapping.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/shadowMapping.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/shadowMapping.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/shadowMapping.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/texturedCube.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/texturedCube.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/texturedCube.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/texturedCube.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/twoCubes.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/twoCubes.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/twoCubes.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/twoCubes.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/videoUploading.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/videoUploading.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/videoUploading.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/videoUploading.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/videoUploadingWebCodecs.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/videoUploadingWebCodecs.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/videoUploadingWebCodecs.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/videoUploadingWebCodecs.json diff --git a/_next/data/kubTdYT8j6epjwt90LFc_/samples/worker.json b/_next/data/pZjCrhGJBHZXDy2WjAS6H/samples/worker.json similarity index 100% rename from _next/data/kubTdYT8j6epjwt90LFc_/samples/worker.json rename to _next/data/pZjCrhGJBHZXDy2WjAS6H/samples/worker.json diff --git a/_next/static/chunks/78.0ab12cc989705588.js b/_next/static/chunks/78.0ab12cc989705588.js new file mode 100644 index 00000000..b49aaaf3 --- /dev/null +++ b/_next/static/chunks/78.0ab12cc989705588.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[78],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return f},hu:function(){return d}});var r=t(5893),a=t(9008),o=t.n(a),i=t(1163),s=t(7294),c=t(9147),l=t.n(c);t(7319);let u=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let o=t(4631);n=o(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),c=(0,s.useRef)(null),u=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),f=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,i.useRouter)(),h=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[p,v]=(0,s.useState)(null),[w,g]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?g(h[1]):g(a[0].name),u&&c.current)for(c.current.appendChild(u.domElement);u.__controllers.length>0;)u.__controllers[0].remove();d&&f.current&&(d.dom.style.position="absolute",d.showPanel(1),f.current.appendChild(d.dom));let t={active:!0},r=()=>{t.active=!1};try{let o=n.current;if(!o)throw Error("The canvas is not available");let i=e.init({canvas:o,pageState:t,gui:u,stats:d});i instanceof Promise&&i.catch(e=>{console.error(e),v(e)})}catch(s){console.error(s),v(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(o(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),p?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(p)})]}):null]}),(0,r.jsxs)("div",{className:l().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:f}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:l().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":w==e.name,onClick(){g(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":w==e.name},n))]})]})},f=e=>(0,r.jsx)(u,{...e});function d(e,n){if(!e)throw Error(n)}},6078:function(e,n,t){"use strict";var r="src/sample/worker/main.ts";t.r(n);var a=t(5671);let o=async e=>{let{canvas:n,pageState:r}=e;if(!r.active)return;let a=new Worker(t.tu(new URL(t.p+t.u(808),t.b)));a.addEventListener("message",e=>{e.data.type,console.error("Unknown Message Type: ".concat(e.data.type))});try{let o=n.transferControlToOffscreen(),i=window.devicePixelRatio;o.width=n.clientWidth*i,o.height=n.clientHeight*i,a.postMessage({type:"init",offscreenCanvas:o},[o])}catch(s){console.warn(s.message),a.terminate()}},i=()=>(0,a.Tl)({name:"WebGPU in a Worker",description:"This example shows one method of using WebGPU in a web worker and presenting to\n the main thread. It uses canvas.transferControlToOffscreen() to produce an offscreen canvas\n which is then transferred to the worker where all the WebGPU calls are made.",init:o,sources:[{name:r.substring(18),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n if (!pageState.active) return;\n\n // The web worker is created by passing a path to the worker's source file, which will then be\n // executed on a separate thread.\n const worker = new Worker(new URL('./worker.ts', import.meta.url));\n\n // The primary way to communicate with the worker is to send and receive messages.\n worker.addEventListener('message', (ev) => {\n // The format of the message can be whatever you'd like, but it's helpful to decide on a\n // consistent convention so that you can tell the message types apart as your apps grow in\n // complexity. Here we establish a convention that all messages to and from the worker will\n // have a `type` field that we can use to determine the content of the message.\n switch (ev.data.type) {\n default: {\n console.error(`Unknown Message Type: ${ev.data.type}`);\n }\n }\n });\n\n try {\n // In order for the worker to display anything on the page, an OffscreenCanvas must be used.\n // Here we can create one from our normal canvas by calling transferControlToOffscreen().\n // Anything drawn to the OffscreenCanvas that call returns will automatically be displayed on\n // the source canvas on the page.\n const offscreenCanvas = canvas.transferControlToOffscreen();\n const devicePixelRatio = window.devicePixelRatio;\n offscreenCanvas.width = canvas.clientWidth * devicePixelRatio;\n offscreenCanvas.height = canvas.clientHeight * devicePixelRatio;\n\n // Send a message to the worker telling it to initialize WebGPU with the OffscreenCanvas. The\n // array passed as the second argument here indicates that the OffscreenCanvas is to be\n // transferred to the worker, meaning this main thread will lose access to it and it will be\n // fully owned by the worker.\n worker.postMessage({ type: 'init', offscreenCanvas }, [offscreenCanvas]);\n } catch (err) {\n // TODO: This catch is added here because React will call init twice with the same canvas, and\n // the second time will fail the transferControlToOffscreen() because it's already been\n // transferred. I'd love to know how to get around that.\n console.warn(err.message);\n worker.terminate();\n }\n};\n\nconst WebGPUWorker: () => JSX.Element = () =>\n makeSample({\n name: 'WebGPU in a Worker',\n description: `This example shows one method of using WebGPU in a web worker and presenting to\n the main thread. It uses canvas.transferControlToOffscreen() to produce an offscreen canvas\n which is then transferred to the worker where all the WebGPU calls are made.`,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './worker.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!./worker.ts').default,\n },\n {\n name: '../../shaders/basic.vert.wgsl',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../shaders/basic.vert.wgsl').default,\n },\n {\n name: '../../shaders/vertexPositionColor.frag.wgsl',\n contents:\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n require('!!raw-loader!../../shaders/vertexPositionColor.frag.wgsl')\n .default,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default WebGPUWorker;\n"},{name:"./worker.ts",contents:t(9168).Z},{name:"../../shaders/basic.vert.wgsl",contents:t(3569).Z},{name:"../../shaders/vertexPositionColor.frag.wgsl",contents:t(1945).Z},{name:"../../meshes/cube.ts",contents:t(2448).Z}],filename:r});n.default=i},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,n){"use strict";n.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"},9168:function(e,n){"use strict";n.Z="import { mat4, vec3 } from 'wgpu-matrix';\n\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\n\nimport basicVertWGSL from '../../shaders/basic.vert.wgsl';\nimport vertexPositionColorWGSL from '../../shaders/vertexPositionColor.frag.wgsl';\n\n// The worker process can instantiate a WebGPU device immediately, but it still needs an\n// OffscreenCanvas to be able to display anything. Here we listen for an 'init' message from the\n// main thread that will contain an OffscreenCanvas transferred from the page, and use that as the\n// signal to begin WebGPU initialization.\nself.addEventListener('message', (ev) => {\n switch (ev.data.type) {\n case 'init': {\n try {\n init(ev.data.offscreenCanvas);\n } catch (err) {\n console.error(\n `Error while initializing WebGPU in worker process: ${err.message}`\n );\n }\n break;\n }\n }\n});\n\n// Once we receive the OffscreenCanvas this init() function is called, which functions similarly\n// to the init() method for all the other samples. The remainder of this file is largely identical\n// to the rotatingCube sample.\nasync function init(canvas) {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n const context = canvas.getContext('webgpu');\n\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: basicVertWGSL,\n }),\n entryPoint: 'main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: vertexPositionColorWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Backface culling since the cube is solid piece of geometry.\n // Faces pointing away from the camera will be occluded by faces\n // pointing toward the camera.\n cullMode: 'back',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n function getTransformationMatrix() {\n const viewMatrix = mat4.identity();\n mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);\n const now = Date.now() / 1000;\n mat4.rotate(\n viewMatrix,\n vec3.fromValues(Math.sin(now), Math.cos(now), 0),\n 1,\n viewMatrix\n );\n\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n\n return modelViewProjectionMatrix as Float32Array;\n }\n\n function frame() {\n const transformationMatrix = getTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n transformationMatrix.buffer,\n transformationMatrix.byteOffset,\n transformationMatrix.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n\n // Note: It is important to return control to the browser regularly in order for the worker to\n // process events. You shouldn't simply loop infinitely with while(true) or similar! Using a\n // traditional requestAnimationFrame() loop in the worker is one way to ensure that events are\n // handled correctly by the worker.\n requestAnimationFrame(frame);\n}\n\nexport {};\n"},3569:function(e,n){"use strict";n.Z="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"},1945:function(e,n){"use strict";n.Z="@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n return fragPosition;\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/78.8a974dff973260d0.js b/_next/static/chunks/78.8a974dff973260d0.js deleted file mode 100644 index 25e075b9..00000000 --- a/_next/static/chunks/78.8a974dff973260d0.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[78],{5671:function(e,n,t){"use strict";t.d(n,{Tl:function(){return f},hu:function(){return d}});var r=t(5893),a=t(9008),o=t.n(a),i=t(1163),s=t(7294),c=t(9147),l=t.n(c);t(7319);let u=e=>{let n=(0,s.useRef)(null),a=(0,s.useMemo)(()=>e.sources.map(e=>{let{name:n,contents:a}=e;return{name:n,...function(e){let n;let a=null;{a=document.createElement("div");let o=t(4631);n=o(a,{lineNumbers:!0,lineWrapping:!0,theme:"monokai",readOnly:!0})}return{Container:function(t){return(0,r.jsx)("div",{...t,children:(0,r.jsx)("div",{ref(t){a&&t&&(t.appendChild(a),n.setOption("value",e))}})})}}}(a)}}),e.sources),c=(0,s.useRef)(null),u=(0,s.useMemo)(()=>{if(e.gui){let n=t(4376);return new n.GUI({autoPlace:!1})}},[]),f=(0,s.useRef)(null),d=(0,s.useMemo)(()=>{if(e.stats){let n=t(2792);return new n}},[]),m=(0,i.useRouter)(),h=m.asPath.match(/#([a-zA-Z0-9\.\/]+)/),[p,v]=(0,s.useState)(null),[g,w]=(0,s.useState)(null);return(0,s.useEffect)(()=>{if(h?w(h[1]):w(a[0].name),u&&c.current)for(c.current.appendChild(u.domElement);u.__controllers.length>0;)u.__controllers[0].remove();d&&f.current&&(d.dom.style.position="absolute",d.showPanel(1),f.current.appendChild(d.dom));let t={active:!0},r=()=>{t.active=!1};try{let o=n.current;if(!o)throw Error("The canvas is not available");let i=e.init({canvas:o,pageState:t,gui:u,stats:d});i instanceof Promise&&i.catch(e=>{console.error(e),v(e)})}catch(s){console.error(s),v(s)}return r},[]),(0,r.jsxs)("main",{children:[(0,r.jsxs)(o(),{children:[(0,r.jsx)("style",{dangerouslySetInnerHTML:{__html:"\n .CodeMirror {\n height: auto !important;\n margin: 1em 0;\n }\n\n .CodeMirror-scroll {\n height: auto !important;\n overflow: visible !important;\n }\n "}}),(0,r.jsx)("title",{children:"".concat(e.name," - WebGPU Samples")}),(0,r.jsx)("meta",{name:"description",content:e.description}),(0,r.jsx)("meta",{httpEquiv:"origin-trial",content:e.originTrial})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("h1",{children:e.name}),(0,r.jsx)("a",{target:"_blank",rel:"noreferrer",href:"https://github.com/".concat("webgpu/webgpu-samples","/tree/main/").concat(e.filename),children:"See it on Github!"}),(0,r.jsx)("p",{children:e.description}),p?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)("p",{children:"Something went wrong. Do your browser and device support WebGPU?"}),(0,r.jsx)("p",{children:"".concat(p)})]}):null]}),(0,r.jsxs)("div",{className:l().canvasContainer,children:[(0,r.jsx)("div",{style:{position:"absolute",left:10},ref:f}),(0,r.jsx)("div",{style:{position:"absolute",right:10},ref:c}),(0,r.jsx)("canvas",{ref:n})]}),(0,r.jsxs)("div",{children:[(0,r.jsx)("nav",{className:l().sourceFileNav,children:(0,r.jsx)("ul",{children:a.map((e,n)=>(0,r.jsx)("li",{children:(0,r.jsx)("a",{href:"#".concat(e.name),"data-active":g==e.name,onClick(){w(e.name)},children:e.name})},n))})}),a.map((e,n)=>(0,r.jsx)(e.Container,{className:l().sourceFileContainer,"data-active":g==e.name},n))]})]})},f=e=>(0,r.jsx)(u,{...e});function d(e,n){if(!e)throw Error(n)}},6078:function(e,n,t){"use strict";var r="src/sample/worker/main.ts";t.r(n);var a=t(5671);let o=async e=>{let{canvas:n,pageState:r}=e;if(!r.active)return;let a=new Worker(t.tu(new URL(t.p+t.u(808),t.b)));a.addEventListener("message",e=>{"log"===e.data.type?console.log(e.data.message):console.error("Unknown Message Type: ".concat(e.data.type))});try{let o=n.transferControlToOffscreen(),i=window.devicePixelRatio;o.width=n.clientWidth*i,o.height=n.clientHeight*i,a.postMessage({type:"init",offscreenCanvas:o},[o])}catch(s){console.warn(s.message),a.terminate()}},i=()=>(0,a.Tl)({name:"WebGPU in a Worker",description:"This example shows one method of using WebGPU in a web worker and presenting to\n the main thread. It uses canvas.transferControlToOffscreen() to produce an offscreen canvas\n which is then transferred to the worker where all the WebGPU calls are made.",init:o,sources:[{name:r.substring(18),contents:"import { makeSample, SampleInit } from '../../components/SampleLayout';\n\nconst init: SampleInit = async ({ canvas, pageState }) => {\n if (!pageState.active) return;\n\n // The web worker is created by passing a path to the worker's source file, which will then be\n // executed on a separate thread.\n const worker = new Worker(new URL('./worker.ts', import.meta.url));\n\n // The primary way to communicate with the worker is to send and receive messages.\n worker.addEventListener('message', (ev) => {\n // The format of the message can be whatever you'd like, but it's helpful to decide on a\n // consistent convention so that you can tell the message types apart as your apps grow in\n // complexity. Here we establish a convention that all messages to and from the worker will\n // have a `type` field that we can use to determine the content of the message.\n switch (ev.data.type) {\n case 'log': {\n // Workers don't have a built-in mechanism for logging to the console, so it's useful to\n // create a way to echo console messages.\n console.log(ev.data.message);\n break;\n }\n default: {\n console.error(`Unknown Message Type: ${ev.data.type}`);\n }\n }\n });\n\n try {\n // In order for the worker to display anything on the page, an OffscreenCanvas must be used.\n // Here we can create one from our normal canvas by calling transferControlToOffscreen().\n // Anything drawn to the OffscreenCanvas that call returns will automatically be displayed on\n // the source canvas on the page.\n const offscreenCanvas = canvas.transferControlToOffscreen();\n const devicePixelRatio = window.devicePixelRatio;\n offscreenCanvas.width = canvas.clientWidth * devicePixelRatio;\n offscreenCanvas.height = canvas.clientHeight * devicePixelRatio;\n\n // Send a message to the worker telling it to initialize WebGPU with the OffscreenCanvas. The\n // array passed as the second argument here indicates that the OffscreenCanvas is to be\n // transferred to the worker, meaning this main thread will lose access to it and it will be\n // fully owned by the worker.\n worker.postMessage({ type: 'init', offscreenCanvas }, [offscreenCanvas]);\n } catch (err) {\n // TODO: This catch is added here because React will call init twice with the same canvas, and\n // the second time will fail the transferControlToOffscreen() because it's already been\n // transferred. I'd love to know how to get around that.\n console.warn(err.message);\n worker.terminate();\n }\n};\n\nconst WebGPUWorker: () => JSX.Element = () =>\n makeSample({\n name: 'WebGPU in a Worker',\n description: `This example shows one method of using WebGPU in a web worker and presenting to\n the main thread. It uses canvas.transferControlToOffscreen() to produce an offscreen canvas\n which is then transferred to the worker where all the WebGPU calls are made.`,\n init,\n sources: [\n {\n name: __filename.substring(__dirname.length + 1),\n contents: __SOURCE__,\n },\n {\n name: './worker.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!./worker.ts').default,\n },\n {\n name: '../../shaders/basic.vert.wgsl',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../shaders/basic.vert.wgsl').default,\n },\n {\n name: '../../shaders/vertexPositionColor.frag.wgsl',\n contents:\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n require('!!raw-loader!../../shaders/vertexPositionColor.frag.wgsl')\n .default,\n },\n {\n name: '../../meshes/cube.ts',\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n contents: require('!!raw-loader!../../meshes/cube.ts').default,\n },\n ],\n filename: __filename,\n });\n\nexport default WebGPUWorker;\n"},{name:"./worker.ts",contents:t(9168).Z},{name:"../../shaders/basic.vert.wgsl",contents:t(3569).Z},{name:"../../shaders/vertexPositionColor.frag.wgsl",contents:t(1945).Z},{name:"../../meshes/cube.ts",contents:t(2448).Z}],filename:r});n.default=i},9147:function(e){e.exports={canvasContainer:"SampleLayout_canvasContainer__zRR_l",sourceFileNav:"SampleLayout_sourceFileNav__ml48P",sourceFileContainer:"SampleLayout_sourceFileContainer__3s84x"}},2448:function(e,n){"use strict";n.Z="export const cubeVertexSize = 4 * 10; // Byte size of one cube vertex.\nexport const cubePositionOffset = 0;\nexport const cubeColorOffset = 4 * 4; // Byte offset of cube vertex color attribute.\nexport const cubeUVOffset = 4 * 8;\nexport const cubeVertexCount = 36;\n\n// prettier-ignore\nexport const cubeVertexArray = new Float32Array([\n // float4 position, float4 color, float2 uv,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, 1, 1, 1, 0, 1, 1, 1, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, -1, -1, 1, 1, 0, 0, 1, 1, 0,\n\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n -1, 1, -1, 1, 0, 1, 0, 1, 0, 0,\n -1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\n 1, 1, -1, 1, 1, 1, 0, 1, 1, 0,\n\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n -1, -1, -1, 1, 0, 0, 0, 1, 0, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n -1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n -1, -1, 1, 1, 0, 0, 1, 1, 1, 0,\n 1, -1, 1, 1, 1, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, -1, -1, 1, 0, 0, 0, 1, 1, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n 1, 1, -1, 1, 1, 1, 0, 1, 0, 0,\n 1, -1, -1, 1, 1, 0, 0, 1, 0, 1,\n -1, 1, -1, 1, 0, 1, 0, 1, 1, 0,\n]);\n"},9168:function(e,n){"use strict";n.Z="import { mat4, vec3 } from 'wgpu-matrix';\n\nimport {\n cubeVertexArray,\n cubeVertexSize,\n cubeUVOffset,\n cubePositionOffset,\n cubeVertexCount,\n} from '../../meshes/cube';\n\nimport basicVertWGSL from '../../shaders/basic.vert.wgsl';\nimport vertexPositionColorWGSL from '../../shaders/vertexPositionColor.frag.wgsl';\n\n// The worker process can instantiate a WebGPU device immediately, but it still needs an\n// OffscreenCanvas to be able to display anything. Here we listen for an 'init' message from the\n// main thread that will contain an OffscreenCanvas transferred from the page, and use that as the\n// signal to begin WebGPU initialization.\nself.addEventListener('message', (ev) => {\n switch (ev.data.type) {\n case 'init': {\n try {\n init(ev.data.offscreenCanvas);\n } catch (err) {\n self.postMessage({\n type: 'log',\n message: `Error while initializing WebGPU in worker process: ${err.message}`,\n });\n }\n break;\n }\n }\n});\n\n// Once we receive the OffscreenCanvas this init() function is called, which functions similarly\n// to the init() method for all the other samples. The remainder of this file is largely identical\n// to the rotatingCube sample.\nasync function init(canvas) {\n const adapter = await navigator.gpu.requestAdapter();\n const device = await adapter.requestDevice();\n const context = canvas.getContext('webgpu');\n\n const presentationFormat = navigator.gpu.getPreferredCanvasFormat();\n\n context.configure({\n device,\n format: presentationFormat,\n alphaMode: 'premultiplied',\n });\n\n // Create a vertex buffer from the cube data.\n const verticesBuffer = device.createBuffer({\n size: cubeVertexArray.byteLength,\n usage: GPUBufferUsage.VERTEX,\n mappedAtCreation: true,\n });\n new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);\n verticesBuffer.unmap();\n\n const pipeline = device.createRenderPipeline({\n layout: 'auto',\n vertex: {\n module: device.createShaderModule({\n code: basicVertWGSL,\n }),\n entryPoint: 'main',\n buffers: [\n {\n arrayStride: cubeVertexSize,\n attributes: [\n {\n // position\n shaderLocation: 0,\n offset: cubePositionOffset,\n format: 'float32x4',\n },\n {\n // uv\n shaderLocation: 1,\n offset: cubeUVOffset,\n format: 'float32x2',\n },\n ],\n },\n ],\n },\n fragment: {\n module: device.createShaderModule({\n code: vertexPositionColorWGSL,\n }),\n entryPoint: 'main',\n targets: [\n {\n format: presentationFormat,\n },\n ],\n },\n primitive: {\n topology: 'triangle-list',\n\n // Backface culling since the cube is solid piece of geometry.\n // Faces pointing away from the camera will be occluded by faces\n // pointing toward the camera.\n cullMode: 'back',\n },\n\n // Enable depth testing so that the fragment closest to the camera\n // is rendered in front.\n depthStencil: {\n depthWriteEnabled: true,\n depthCompare: 'less',\n format: 'depth24plus',\n },\n });\n\n const depthTexture = device.createTexture({\n size: [canvas.width, canvas.height],\n format: 'depth24plus',\n usage: GPUTextureUsage.RENDER_ATTACHMENT,\n });\n\n const uniformBufferSize = 4 * 16; // 4x4 matrix\n const uniformBuffer = device.createBuffer({\n size: uniformBufferSize,\n usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,\n });\n\n const uniformBindGroup = device.createBindGroup({\n layout: pipeline.getBindGroupLayout(0),\n entries: [\n {\n binding: 0,\n resource: {\n buffer: uniformBuffer,\n },\n },\n ],\n });\n\n const renderPassDescriptor: GPURenderPassDescriptor = {\n colorAttachments: [\n {\n view: undefined, // Assigned later\n\n clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },\n loadOp: 'clear',\n storeOp: 'store',\n },\n ],\n depthStencilAttachment: {\n view: depthTexture.createView(),\n\n depthClearValue: 1.0,\n depthLoadOp: 'clear',\n depthStoreOp: 'store',\n },\n };\n\n const aspect = canvas.width / canvas.height;\n const projectionMatrix = mat4.perspective(\n (2 * Math.PI) / 5,\n aspect,\n 1,\n 100.0\n );\n const modelViewProjectionMatrix = mat4.create();\n\n function getTransformationMatrix() {\n const viewMatrix = mat4.identity();\n mat4.translate(viewMatrix, vec3.fromValues(0, 0, -4), viewMatrix);\n const now = Date.now() / 1000;\n mat4.rotate(\n viewMatrix,\n vec3.fromValues(Math.sin(now), Math.cos(now), 0),\n 1,\n viewMatrix\n );\n\n mat4.multiply(projectionMatrix, viewMatrix, modelViewProjectionMatrix);\n\n return modelViewProjectionMatrix as Float32Array;\n }\n\n function frame() {\n const transformationMatrix = getTransformationMatrix();\n device.queue.writeBuffer(\n uniformBuffer,\n 0,\n transformationMatrix.buffer,\n transformationMatrix.byteOffset,\n transformationMatrix.byteLength\n );\n renderPassDescriptor.colorAttachments[0].view = context\n .getCurrentTexture()\n .createView();\n\n const commandEncoder = device.createCommandEncoder();\n const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);\n passEncoder.setPipeline(pipeline);\n passEncoder.setBindGroup(0, uniformBindGroup);\n passEncoder.setVertexBuffer(0, verticesBuffer);\n passEncoder.draw(cubeVertexCount);\n passEncoder.end();\n device.queue.submit([commandEncoder.finish()]);\n\n requestAnimationFrame(frame);\n }\n\n // Note: It is important to return control to the browser regularly in order for the worker to\n // process events. You shouldn't simply loop infinitely with while(true) or similar! Using a\n // traditional requestAnimationFrame() loop in the worker is one way to ensure that events are\n // handled correctly by the worker.\n requestAnimationFrame(frame);\n}\n\nexport {};\n"},3569:function(e,n){"use strict";n.Z="struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"},1945:function(e,n){"use strict";n.Z="@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n return fragPosition;\n}\n"}}]); \ No newline at end of file diff --git a/_next/static/chunks/808.80bdfd2012be5144.js b/_next/static/chunks/808.b76a8a51b05711fb.js similarity index 59% rename from _next/static/chunks/808.80bdfd2012be5144.js rename to _next/static/chunks/808.b76a8a51b05711fb.js index 95d1e663..a834cb7b 100644 --- a/_next/static/chunks/808.80bdfd2012be5144.js +++ b/_next/static/chunks/808.b76a8a51b05711fb.js @@ -1 +1 @@ -!function(){var e,t,r,n,o,i,a,u={808:function(e,t,r){"use strict";var n=r(6416);let o=new Float32Array([1,-1,1,1,1,0,1,1,0,1,-1,-1,1,1,0,0,1,1,1,1,-1,-1,-1,1,0,0,0,1,1,0,1,-1,-1,1,1,0,0,1,0,0,1,-1,1,1,1,0,1,1,0,1,-1,-1,-1,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,0,1,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,1,0,-1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,1,1,1,0,1,1,0,-1,1,-1,1,0,1,0,1,0,0,-1,1,1,1,0,1,1,1,0,1,1,1,-1,1,1,1,0,1,1,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,1,-1,1,0,1,0,1,1,0,-1,-1,-1,1,0,0,0,1,0,0,-1,-1,1,1,0,0,1,1,0,1,-1,1,-1,1,0,1,0,1,1,0,1,1,1,1,1,1,1,1,0,1,-1,1,1,1,0,1,1,1,1,1,-1,-1,1,1,0,0,1,1,1,0,-1,-1,1,1,0,0,1,1,1,0,1,-1,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,-1,-1,1,1,0,0,1,0,1,-1,-1,-1,1,0,0,0,1,1,1,-1,1,-1,1,0,1,0,1,1,0,1,1,-1,1,1,1,0,1,0,0,1,-1,-1,1,1,0,0,1,0,1,-1,1,-1,1,0,1,0,1,1,0]);async function i(e){let t=await navigator.gpu.requestAdapter(),r=await t.requestDevice(),i=e.getContext("webgpu"),a=navigator.gpu.getPreferredCanvasFormat();i.configure({device:r,format:a,alphaMode:"premultiplied"});let u=r.createBuffer({size:o.byteLength,usage:GPUBufferUsage.VERTEX,mappedAtCreation:!0});new Float32Array(u.getMappedRange()).set(o),u.unmap();let s=r.createRenderPipeline({layout:"auto",vertex:{module:r.createShaderModule({code:"struct Uniforms {\n modelViewProjectionMatrix : mat4x4,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"}),entryPoint:"main",buffers:[{arrayStride:40,attributes:[{shaderLocation:0,offset:0,format:"float32x4"},{shaderLocation:1,offset:32,format:"float32x2"}]}]},fragment:{module:r.createShaderModule({code:"@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n return fragPosition;\n}\n"}),entryPoint:"main",targets:[{format:a}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),f=r.createTexture({size:[e.width,e.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),c=r.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),l=r.createBindGroup({layout:s.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:c}}]}),p={colorAttachments:[{view:void 0,clearValue:{r:.5,g:.5,b:.5,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:f.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},d=e.width/e.height,g=n._E.perspective(2*Math.PI/5,d,1,100),m=n._E.create();requestAnimationFrame(function e(){let t=function(){let e=n._E.identity();n._E.translate(e,n.R3.fromValues(0,0,-4),e);let t=Date.now()/1e3;return n._E.rotate(e,n.R3.fromValues(Math.sin(t),Math.cos(t),0),1,e),n._E.multiply(g,e,m),m}();r.queue.writeBuffer(c,0,t.buffer,t.byteOffset,t.byteLength),p.colorAttachments[0].view=i.getCurrentTexture().createView();let o=r.createCommandEncoder(),a=o.beginRenderPass(p);a.setPipeline(s),a.setBindGroup(0,l),a.setVertexBuffer(0,u),a.draw(36),a.end(),r.queue.submit([o.finish()]),requestAnimationFrame(e)})}self.addEventListener("message",e=>{if("init"===e.data.type)try{i(e.data.offscreenCanvas)}catch(t){self.postMessage({type:"log",message:"Error while initializing WebGPU in worker process: ".concat(t.message)})}})}},s={};function f(e){var t=s[e];if(void 0!==t)return t.exports;var r=s[e]={exports:{}},n=!0;try{u[e](r,r.exports,f),n=!1}finally{n&&delete s[e]}return r.exports}f.m=u,f.x=function(){var e=f.O(void 0,[746],function(){return f(808)});return f.O(e)},e=[],f.O=function(t,r,n,o){if(r){o=o||0;for(var i=e.length;i>0&&e[i-1][2]>o;i--)e[i]=e[i-1];e[i]=[r,n,o];return}for(var a=1/0,i=0;i=o&&Object.keys(f.O).every(function(e){return f.O[e](r[s])})?r.splice(s--,1):(u=!1,o,\n}\n@binding(0) @group(0) var uniforms : Uniforms;\n\nstruct VertexOutput {\n @builtin(position) Position : vec4,\n @location(0) fragUV : vec2,\n @location(1) fragPosition: vec4,\n}\n\n@vertex\nfn main(\n @location(0) position : vec4,\n @location(1) uv : vec2\n) -> VertexOutput {\n var output : VertexOutput;\n output.Position = uniforms.modelViewProjectionMatrix * position;\n output.fragUV = uv;\n output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));\n return output;\n}\n"}),entryPoint:"main",buffers:[{arrayStride:40,attributes:[{shaderLocation:0,offset:0,format:"float32x4"},{shaderLocation:1,offset:32,format:"float32x2"}]}]},fragment:{module:r.createShaderModule({code:"@fragment\nfn main(\n @location(0) fragUV: vec2,\n @location(1) fragPosition: vec4\n) -> @location(0) vec4 {\n return fragPosition;\n}\n"}),entryPoint:"main",targets:[{format:a}]},primitive:{topology:"triangle-list",cullMode:"back"},depthStencil:{depthWriteEnabled:!0,depthCompare:"less",format:"depth24plus"}}),s=r.createTexture({size:[e.width,e.height],format:"depth24plus",usage:GPUTextureUsage.RENDER_ATTACHMENT}),c=r.createBuffer({size:64,usage:GPUBufferUsage.UNIFORM|GPUBufferUsage.COPY_DST}),l=r.createBindGroup({layout:f.getBindGroupLayout(0),entries:[{binding:0,resource:{buffer:c}}]}),p={colorAttachments:[{view:void 0,clearValue:{r:.5,g:.5,b:.5,a:1},loadOp:"clear",storeOp:"store"}],depthStencilAttachment:{view:s.createView(),depthClearValue:1,depthLoadOp:"clear",depthStoreOp:"store"}},d=e.width/e.height,m=n._E.perspective(2*Math.PI/5,d,1,100),g=n._E.create();requestAnimationFrame(function e(){let t=function(){let e=n._E.identity();n._E.translate(e,n.R3.fromValues(0,0,-4),e);let t=Date.now()/1e3;return n._E.rotate(e,n.R3.fromValues(Math.sin(t),Math.cos(t),0),1,e),n._E.multiply(m,e,g),g}();r.queue.writeBuffer(c,0,t.buffer,t.byteOffset,t.byteLength),p.colorAttachments[0].view=i.getCurrentTexture().createView();let o=r.createCommandEncoder(),a=o.beginRenderPass(p);a.setPipeline(f),a.setBindGroup(0,l),a.setVertexBuffer(0,u),a.draw(36),a.end(),r.queue.submit([o.finish()]),requestAnimationFrame(e)})}self.addEventListener("message",e=>{if("init"===e.data.type)try{i(e.data.offscreenCanvas)}catch(t){console.error("Error while initializing WebGPU in worker process: ".concat(t.message))}})}},f={};function s(e){var t=f[e];if(void 0!==t)return t.exports;var r=f[e]={exports:{}},n=!0;try{u[e](r,r.exports,s),n=!1}finally{n&&delete f[e]}return r.exports}s.m=u,s.x=function(){var e=s.O(void 0,[746],function(){return s(808)});return s.O(e)},e=[],s.O=function(t,r,n,o){if(r){o=o||0;for(var i=e.length;i>0&&e[i-1][2]>o;i--)e[i]=e[i-1];e[i]=[r,n,o];return}for(var a=1/0,i=0;i=o&&Object.keys(s.O).every(function(e){return s.O[e](r[f])})?r.splice(f--,1):(u=!1,o0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var o=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(c=!1,f0&&e[a-1][2]>f;a--)e[a]=e[a-1];e[a]=[r,n,f];return}for(var o=1/0,a=0;a=f&&Object.keys(l.O).every(function(e){return l.O[e](r[i])})?r.splice(i--,1):(c=!1,fWebGPU Samples \ No newline at end of file +WebGPU Samples \ No newline at end of file diff --git a/samples/A-buffer.html b/samples/A-buffer.html index eff25221..1e1b10a6 100644 --- a/samples/A-buffer.html +++ b/samples/A-buffer.html @@ -10,6 +10,6 @@ } A-Buffer - WebGPU Samples

A-Buffer

See it on Github!

Demonstrates order independent transparency using a per-pixel + limiting memory usage (when required)."/>

\ No newline at end of file + limiting memory usage (when required).

\ No newline at end of file diff --git a/samples/animometer.html b/samples/animometer.html index e3980043..326be2c2 100644 --- a/samples/animometer.html +++ b/samples/animometer.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Animometer - WebGPU Samples \ No newline at end of file + Animometer - WebGPU Samples \ No newline at end of file diff --git a/samples/bitonicSort.html b/samples/bitonicSort.html index 531e0452..6a6b7d9a 100644 --- a/samples/bitonicSort.html +++ b/samples/bitonicSort.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file + Bitonic Sort - WebGPU Samples

Bitonic Sort

See it on Github!

A naive bitonic sort algorithm executed on the GPU, based on tgfrerer's implementation at poniesandlight.co.uk/reflect/bitonic_merge_sort/. Each invocation of the bitonic sort shader dispatches a workgroup containing elements/2 threads. The GUI's Execution Information folder contains information about the sort's current state. The visualizer displays the sort's results as colored cells sorted from brightest to darkest.

\ No newline at end of file diff --git a/samples/cameras.html b/samples/cameras.html index 478cb5ea..eef67ee5 100644 --- a/samples/cameras.html +++ b/samples/cameras.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cameras - WebGPU Samples \ No newline at end of file + Cameras - WebGPU Samples \ No newline at end of file diff --git a/samples/computeBoids.html b/samples/computeBoids.html index 362f6dd5..93494f75 100644 --- a/samples/computeBoids.html +++ b/samples/computeBoids.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Compute Boids - WebGPU Samples \ No newline at end of file + Compute Boids - WebGPU Samples \ No newline at end of file diff --git a/samples/cornell.html b/samples/cornell.html index e7c08b35..7876eadc 100644 --- a/samples/cornell.html +++ b/samples/cornell.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cornell box - WebGPU Samples \ No newline at end of file + Cornell box - WebGPU Samples \ No newline at end of file diff --git a/samples/cubemap.html b/samples/cubemap.html index 3807a1c3..c9cdc33c 100644 --- a/samples/cubemap.html +++ b/samples/cubemap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Cubemap - WebGPU Samples \ No newline at end of file + Cubemap - WebGPU Samples \ No newline at end of file diff --git a/samples/deferredRendering.html b/samples/deferredRendering.html index 1ce31bea..b9d87bdb 100644 --- a/samples/deferredRendering.html +++ b/samples/deferredRendering.html @@ -16,7 +16,7 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. - "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. + "/>

Deferred Rendering

See it on Github!

This example shows how to do deferred rendering with webgpu. Render geometry info to multiple targets in the gBuffers in the first pass. In this sample we have 2 gBuffers for normals and albedo, along with a depth texture. And then do the lighting in a second pass with per fragment data read from gBuffers so it's independent of scene complexity. @@ -24,4 +24,4 @@ We also update light position in a compute shader, where further operations like tile/cluster culling could happen. The debug view shows the depth buffer on the left (flipped and scaled a bit to make it more visible), the normal G buffer in the middle, and the albedo G-buffer on the right side of the screen. -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/fractalCube.html b/samples/fractalCube.html index 0a20255b..c42f2381 100644 --- a/samples/fractalCube.html +++ b/samples/fractalCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Fractal Cube - WebGPU Samples \ No newline at end of file + Fractal Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/gameOfLife.html b/samples/gameOfLife.html index cf9dc92e..89b21878 100644 --- a/samples/gameOfLife.html +++ b/samples/gameOfLife.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Conway's Game of Life - WebGPU Samples \ No newline at end of file + Conway's Game of Life - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangle.html b/samples/helloTriangle.html index a679f41b..8813bb37 100644 --- a/samples/helloTriangle.html +++ b/samples/helloTriangle.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle - WebGPU Samples \ No newline at end of file + Hello Triangle - WebGPU Samples \ No newline at end of file diff --git a/samples/helloTriangleMSAA.html b/samples/helloTriangleMSAA.html index 4386fd3c..c5246391 100644 --- a/samples/helloTriangleMSAA.html +++ b/samples/helloTriangleMSAA.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Hello Triangle MSAA - WebGPU Samples \ No newline at end of file + Hello Triangle MSAA - WebGPU Samples \ No newline at end of file diff --git a/samples/imageBlur.html b/samples/imageBlur.html index e62b62de..5a7203f1 100644 --- a/samples/imageBlur.html +++ b/samples/imageBlur.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Image Blur - WebGPU Samples \ No newline at end of file + Image Blur - WebGPU Samples \ No newline at end of file diff --git a/samples/instancedCube.html b/samples/instancedCube.html index 3f3e11b7..c7520afe 100644 --- a/samples/instancedCube.html +++ b/samples/instancedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Instanced Cube - WebGPU Samples \ No newline at end of file + Instanced Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/normalMap.html b/samples/normalMap.html index 27a531ea..73be4169 100644 --- a/samples/normalMap.html +++ b/samples/normalMap.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Normal Mapping - WebGPU Samples \ No newline at end of file + Normal Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/particles.html b/samples/particles.html index 798929f1..d9289f54 100644 --- a/samples/particles.html +++ b/samples/particles.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Particles - WebGPU Samples \ No newline at end of file + Particles - WebGPU Samples \ No newline at end of file diff --git a/samples/renderBundles.html b/samples/renderBundles.html index 19bd216d..a8836ca8 100644 --- a/samples/renderBundles.html +++ b/samples/renderBundles.html @@ -11,7 +11,7 @@ Render Bundles - WebGPU Samples

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of + of instancing to reduce draw overhead.)"/>

Render Bundles

See it on Github!

This example shows how to use render bundles. It renders a large number of meshes individually as a proxy for a more complex scene in order to demonstrate the reduction in JavaScript time spent to issue render commands. (Typically a scene like this would make use - of instancing to reduce draw overhead.)

\ No newline at end of file + of instancing to reduce draw overhead.)

\ No newline at end of file diff --git a/samples/resizeCanvas.html b/samples/resizeCanvas.html index 17c64895..4bf17783 100644 --- a/samples/resizeCanvas.html +++ b/samples/resizeCanvas.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Resize Canvas - WebGPU Samples \ No newline at end of file + Resize Canvas - WebGPU Samples \ No newline at end of file diff --git a/samples/reversedZ.html b/samples/reversedZ.html index 2771ed0b..9b24a542 100644 --- a/samples/reversedZ.html +++ b/samples/reversedZ.html @@ -17,7 +17,7 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ - "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. + "/>

Reversed Z

See it on Github!

This example shows the use of reversed z technique for better utilization of depth buffer precision. The left column uses regular method, while the right one uses reversed z technique. Both are using depth32float as their depth buffer format. A set of red and green planes are positioned very close to each other. Higher sets are placed further from camera (and are scaled for better visual purpose). @@ -26,4 +26,4 @@ Related reading: https://developer.nvidia.com/content/depth-precision-visualized https://thxforthefish.com/posts/reverse_z/ -

\ No newline at end of file +

\ No newline at end of file diff --git a/samples/rotatingCube.html b/samples/rotatingCube.html index 7b19bef0..756ffb42 100644 --- a/samples/rotatingCube.html +++ b/samples/rotatingCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Rotating Cube - WebGPU Samples \ No newline at end of file + Rotating Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/samplerParameters.html b/samples/samplerParameters.html index 72f4d4ca..9772fded 100644 --- a/samples/samplerParameters.html +++ b/samples/samplerParameters.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file + Sampler Parameters - WebGPU Samples

Sampler Parameters

See it on Github!

Visualizes what all the sampler parameters do. Shows a textured plane at various scales (rotated, head-on, in perspective, and in vanishing perspective). The bottom-right view shows the raw contents of the 4 mipmap levels of the test texture (16x16, 8x8, 4x4, and 2x2).

\ No newline at end of file diff --git a/samples/shadowMapping.html b/samples/shadowMapping.html index 29714345..0168f1e8 100644 --- a/samples/shadowMapping.html +++ b/samples/shadowMapping.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Shadow Mapping - WebGPU Samples \ No newline at end of file + Shadow Mapping - WebGPU Samples \ No newline at end of file diff --git a/samples/texturedCube.html b/samples/texturedCube.html index 11516d99..2af5a8ac 100644 --- a/samples/texturedCube.html +++ b/samples/texturedCube.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Textured Cube - WebGPU Samples \ No newline at end of file + Textured Cube - WebGPU Samples \ No newline at end of file diff --git a/samples/twoCubes.html b/samples/twoCubes.html index bd66e071..72c60d7c 100644 --- a/samples/twoCubes.html +++ b/samples/twoCubes.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Two Cubes - WebGPU Samples \ No newline at end of file + Two Cubes - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploading.html b/samples/videoUploading.html index b3328ab7..4c07557e 100644 --- a/samples/videoUploading.html +++ b/samples/videoUploading.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading - WebGPU Samples \ No newline at end of file + Video Uploading - WebGPU Samples \ No newline at end of file diff --git a/samples/videoUploadingWebCodecs.html b/samples/videoUploadingWebCodecs.html index 3a7dcf17..1d6b8bac 100644 --- a/samples/videoUploadingWebCodecs.html +++ b/samples/videoUploadingWebCodecs.html @@ -8,4 +8,4 @@ height: auto !important; overflow: visible !important; } - Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file + Video Uploading with WebCodecs - WebGPU Samples \ No newline at end of file diff --git a/samples/worker.html b/samples/worker.html index c53010d2..0db5b6fa 100644 --- a/samples/worker.html +++ b/samples/worker.html @@ -10,6 +10,6 @@ } WebGPU in a Worker - WebGPU Samples

WebGPU in a Worker

See it on Github!

This example shows one method of using WebGPU in a web worker and presenting to + which is then transferred to the worker where all the WebGPU calls are made."/>

\ No newline at end of file + which is then transferred to the worker where all the WebGPU calls are made.

\ No newline at end of file