diff --git a/src/webgpu/shader/execution/expression/expression.ts b/src/webgpu/shader/execution/expression/expression.ts index fb0b417930c2..f85516f29bdd 100644 --- a/src/webgpu/shader/execution/expression/expression.ts +++ b/src/webgpu/shader/execution/expression/expression.ts @@ -353,6 +353,22 @@ export async function run( } }; + const processBatch = async (batchCases: CaseList) => { + const checkBatch = await submitBatch( + t, + shaderBuilder, + parameterTypes, + resultType, + batchCases, + cfg.inputSource, + pipelineCache + ); + checkBatch(); + void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback); + }; + + const pendingBatches = []; + for (let i = 0; i < cases.length; i += casesPerBatch) { const batchCases = cases.slice(i, Math.min(i + casesPerBatch, cases.length)); @@ -365,18 +381,10 @@ export async function run( } batchesInFlight += 1; - const checkBatch = submitBatch( - t, - shaderBuilder, - parameterTypes, - resultType, - batchCases, - cfg.inputSource, - pipelineCache - ); - checkBatch(); - void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback); + pendingBatches.push(processBatch(batchCases)); } + + await Promise.all(pendingBatches); } /** @@ -391,7 +399,7 @@ export async function run( * @param pipelineCache the cache of compute pipelines, shared between batches * @returns a function that checks the results are as expected */ -function submitBatch( +async function submitBatch( t: GPUTest, shaderBuilder: ShaderBuilder, parameterTypes: Array, @@ -399,7 +407,7 @@ function submitBatch( cases: CaseList, inputSource: InputSource, pipelineCache: PipelineCache -): () => void { +): Promise<() => void> { // Construct a buffer to hold the results of the expression tests const outputBufferSize = cases.length * valueStride(resultType); const outputBuffer = t.device.createBuffer({ @@ -407,7 +415,7 @@ function submitBatch( usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE, }); - const [pipeline, group] = buildPipeline( + const [pipeline, group] = await buildPipeline( t, shaderBuilder, parameterTypes, @@ -1003,7 +1011,7 @@ ${body} * @param outputBuffer the buffer that will hold the output values of the tests * @param pipelineCache the cache of compute pipelines, shared between batches */ -function buildPipeline( +async function buildPipeline( t: GPUTest, shaderBuilder: ShaderBuilder, parameterTypes: Array, @@ -1012,7 +1020,7 @@ function buildPipeline( inputSource: InputSource, outputBuffer: GPUBuffer, pipelineCache: PipelineCache -): [GPUComputePipeline, GPUBindGroup] { +): Promise<[GPUComputePipeline, GPUBindGroup]> { cases.forEach(c => { const inputTypes = c.input instanceof Array ? c.input.map(i => i.type) : [c.input.type]; if (!objectEquals(inputTypes, parameterTypes)) { @@ -1032,7 +1040,7 @@ function buildPipeline( const module = t.device.createShaderModule({ code: source }); // build the pipeline - const pipeline = t.device.createComputePipeline({ + const pipeline = await t.device.createComputePipelineAsync({ layout: 'auto', compute: { module, entryPoint: 'main' }, }); diff --git a/src/webgpu/shader/execution/robust_access.spec.ts b/src/webgpu/shader/execution/robust_access.spec.ts index 03b58265661a..965dd283dd16 100644 --- a/src/webgpu/shader/execution/robust_access.spec.ts +++ b/src/webgpu/shader/execution/robust_access.spec.ts @@ -23,14 +23,14 @@ const kMinI32 = -0x8000_0000; * Non-test bindings are in bind group 1, including: * - `constants.zero`: a dynamically-uniform `0u` value. */ -function runShaderTest( +async function runShaderTest( t: GPUTest, stage: GPUShaderStageFlags, testSource: string, layout: GPUPipelineLayout, testBindings: GPUBindGroupEntry[], dynamicOffsets?: number[] -): void { +): Promise { assert(stage === GPUShaderStage.COMPUTE, 'Only know how to deal with compute for now'); // Contains just zero (for now). @@ -62,7 +62,7 @@ fn main() { t.debug(source); const module = t.device.createShaderModule({ code: source }); - const pipeline = t.device.createComputePipeline({ + const pipeline = await t.device.createComputePipelineAsync({ layout, compute: { module, entryPoint: 'main' }, }); @@ -172,7 +172,7 @@ g.test('linear_memory') .expand('baseType', supportedScalarTypes) .expandWithParams(generateTypes) ) - .fn(t => { + .fn(async t => { const { addressSpace, storageMode, @@ -448,7 +448,7 @@ fn runTest() -> u32 { ); // Run the shader, accessing the buffer. - runShaderTest( + await runShaderTest( t, GPUShaderStage.COMPUTE, testSource, @@ -475,6 +475,6 @@ fn runTest() -> u32 { bufferBindingEnd ); } else { - runShaderTest(t, GPUShaderStage.COMPUTE, testSource, layout, []); + await runShaderTest(t, GPUShaderStage.COMPUTE, testSource, layout, []); } }); diff --git a/src/webgpu/shader/execution/zero_init.spec.ts b/src/webgpu/shader/execution/zero_init.spec.ts index fe298a161a66..e03a72f8df56 100644 --- a/src/webgpu/shader/execution/zero_init.spec.ts +++ b/src/webgpu/shader/execution/zero_init.spec.ts @@ -227,7 +227,7 @@ g.test('compute,zero_init') }) ) .batch(15) - .fn(t => { + .fn(async t => { const { workgroupSize } = t.params; const { maxComputeInvocationsPerWorkgroup } = t.device.limits; const numWorkgroupInvocations = workgroupSize.reduce((a, b) => a * b); @@ -446,7 +446,7 @@ g.test('compute,zero_init') ], }); - const fillPipeline = t.device.createComputePipeline({ + const fillPipeline = await t.device.createComputePipelineAsync({ layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }), label: 'Workgroup Fill Pipeline', compute: { @@ -495,7 +495,7 @@ g.test('compute,zero_init') t.queue.submit([e.finish()]); } - const pipeline = t.device.createComputePipeline({ + const pipeline = await t.device.createComputePipelineAsync({ layout: 'auto', compute: { module: t.device.createShaderModule({