Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Speed up shader,execution,expression tests with async pipelines #3125

Merged
merged 4 commits into from
Nov 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 25 additions & 17 deletions src/webgpu/shader/execution/expression/expression.ts
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,22 @@ export async function run(
}
};

const processBatch = async (batchCases: CaseList) => {
const checkBatch = await submitBatch(
t,
shaderBuilder,
parameterTypes,
resultType,
batchCases,
cfg.inputSource,
pipelineCache
);
checkBatch();
void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback);
};

const pendingBatches = [];

for (let i = 0; i < cases.length; i += casesPerBatch) {
const batchCases = cases.slice(i, Math.min(i + casesPerBatch, cases.length));

Expand All @@ -365,18 +381,10 @@ export async function run(
}
batchesInFlight += 1;

const checkBatch = submitBatch(
t,
shaderBuilder,
parameterTypes,
resultType,
batchCases,
cfg.inputSource,
pipelineCache
);
checkBatch();
void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback);
pendingBatches.push(processBatch(batchCases));
}

await Promise.all(pendingBatches);
}

/**
Expand All @@ -391,23 +399,23 @@ export async function run(
* @param pipelineCache the cache of compute pipelines, shared between batches
* @returns a function that checks the results are as expected
*/
function submitBatch(
async function submitBatch(
t: GPUTest,
shaderBuilder: ShaderBuilder,
parameterTypes: Array<Type>,
resultType: Type,
cases: CaseList,
inputSource: InputSource,
pipelineCache: PipelineCache
): () => void {
): Promise<() => void> {
// Construct a buffer to hold the results of the expression tests
const outputBufferSize = cases.length * valueStride(resultType);
const outputBuffer = t.device.createBuffer({
size: outputBufferSize,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
});

const [pipeline, group] = buildPipeline(
const [pipeline, group] = await buildPipeline(
t,
shaderBuilder,
parameterTypes,
Expand Down Expand Up @@ -1003,7 +1011,7 @@ ${body}
* @param outputBuffer the buffer that will hold the output values of the tests
* @param pipelineCache the cache of compute pipelines, shared between batches
*/
function buildPipeline(
async function buildPipeline(
t: GPUTest,
shaderBuilder: ShaderBuilder,
parameterTypes: Array<Type>,
Expand All @@ -1012,7 +1020,7 @@ function buildPipeline(
inputSource: InputSource,
outputBuffer: GPUBuffer,
pipelineCache: PipelineCache
): [GPUComputePipeline, GPUBindGroup] {
): Promise<[GPUComputePipeline, GPUBindGroup]> {
cases.forEach(c => {
const inputTypes = c.input instanceof Array ? c.input.map(i => i.type) : [c.input.type];
if (!objectEquals(inputTypes, parameterTypes)) {
Expand All @@ -1032,7 +1040,7 @@ function buildPipeline(
const module = t.device.createShaderModule({ code: source });

// build the pipeline
const pipeline = t.device.createComputePipeline({
const pipeline = await t.device.createComputePipelineAsync({
layout: 'auto',
compute: { module, entryPoint: 'main' },
});
Expand Down
12 changes: 6 additions & 6 deletions src/webgpu/shader/execution/robust_access.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@ const kMinI32 = -0x8000_0000;
* Non-test bindings are in bind group 1, including:
* - `constants.zero`: a dynamically-uniform `0u` value.
*/
function runShaderTest(
async function runShaderTest(
t: GPUTest,
stage: GPUShaderStageFlags,
testSource: string,
layout: GPUPipelineLayout,
testBindings: GPUBindGroupEntry[],
dynamicOffsets?: number[]
): void {
): Promise<void> {
assert(stage === GPUShaderStage.COMPUTE, 'Only know how to deal with compute for now');

// Contains just zero (for now).
Expand Down Expand Up @@ -62,7 +62,7 @@ fn main() {

t.debug(source);
const module = t.device.createShaderModule({ code: source });
const pipeline = t.device.createComputePipeline({
const pipeline = await t.device.createComputePipelineAsync({
layout,
compute: { module, entryPoint: 'main' },
});
Expand Down Expand Up @@ -172,7 +172,7 @@ g.test('linear_memory')
.expand('baseType', supportedScalarTypes)
.expandWithParams(generateTypes)
)
.fn(t => {
.fn(async t => {
const {
addressSpace,
storageMode,
Expand Down Expand Up @@ -448,7 +448,7 @@ fn runTest() -> u32 {
);

// Run the shader, accessing the buffer.
runShaderTest(
await runShaderTest(
t,
GPUShaderStage.COMPUTE,
testSource,
Expand All @@ -475,6 +475,6 @@ fn runTest() -> u32 {
bufferBindingEnd
);
} else {
runShaderTest(t, GPUShaderStage.COMPUTE, testSource, layout, []);
await runShaderTest(t, GPUShaderStage.COMPUTE, testSource, layout, []);
}
});
6 changes: 3 additions & 3 deletions src/webgpu/shader/execution/zero_init.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ g.test('compute,zero_init')
})
)
.batch(15)
.fn(t => {
.fn(async t => {
const { workgroupSize } = t.params;
const { maxComputeInvocationsPerWorkgroup } = t.device.limits;
const numWorkgroupInvocations = workgroupSize.reduce((a, b) => a * b);
Expand Down Expand Up @@ -446,7 +446,7 @@ g.test('compute,zero_init')
],
});

const fillPipeline = t.device.createComputePipeline({
const fillPipeline = await t.device.createComputePipelineAsync({
layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
label: 'Workgroup Fill Pipeline',
compute: {
Expand Down Expand Up @@ -495,7 +495,7 @@ g.test('compute,zero_init')
t.queue.submit([e.finish()]);
}

const pipeline = t.device.createComputePipeline({
const pipeline = await t.device.createComputePipelineAsync({
layout: 'auto',
compute: {
module: t.device.createShaderModule({
Expand Down