Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bitonic Sort Update 2 #318

Merged
merged 29 commits into from
Dec 4, 2023
Merged
Show file tree
Hide file tree
Changes from 24 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
d51dc45
Merge pull request #1 from webgpu/main
cmhhelgeson Oct 18, 2023
32ec9b1
Changed default values and added auto sort at start
cmhhelgeson Oct 26, 2023
acdc040
Added stepIndex and totalStep to execution Information and allowed fo…
cmhhelgeson Oct 26, 2023
33bce15
Scaffolding for global sort and renaming of controller elements from …
cmhhelgeson Oct 27, 2023
4b65446
removed anys for greggman
cmhhelgeson Oct 27, 2023
766427d
Merge pull request #2 from webgpu/main
cmhhelgeson Oct 27, 2023
ebf1684
Safety commit
cmhhelgeson Oct 30, 2023
181164c
Adjusted bindGroups and uniform placement and added different visuali…
cmhhelgeson Oct 30, 2023
fc40521
Adjusted bindGroups, adjusted uniforms, and added a new visualization…
cmhhelgeson Oct 30, 2023
50a1bae
Finished adding updates
cmhhelgeson Oct 30, 2023
174db18
Removed pointerEvents from non-interactive gui elements and added ato…
cmhhelgeson Oct 31, 2023
1a4a0d1
Made totalSwaps non-interactive :(
cmhhelgeson Oct 31, 2023
415bd67
Merge pull request #3 from webgpu/main
cmhhelgeson Oct 31, 2023
bd83e37
Sketched out new branch
cmhhelgeson Oct 31, 2023
3751527
Sketch
cmhhelgeson Nov 1, 2023
bb85b16
Condense information in multiple Gui elements down to single Gui elem…
cmhhelgeson Nov 2, 2023
c63125c
Added thread constraint
cmhhelgeson Nov 2, 2023
354aeeb
Merge branch 'bitonic_sort_branch' of https://github.com/cmhhelgeson/…
cmhhelgeson Nov 2, 2023
706e090
Added ability to constrain maximum number of threads/invocations per …
cmhhelgeson Nov 2, 2023
80596b3
Get rid of todos
cmhhelgeson Nov 2, 2023
e240142
Remove unused folder
cmhhelgeson Nov 2, 2023
e0df2ea
Removed unintended addition to slug file
cmhhelgeson Nov 2, 2023
63d98ee
removed references to threads in main.ts, removed references to remov…
cmhhelgeson Nov 30, 2023
32db6c9
Removed all references to threads in bitonicCompute.ts
cmhhelgeson Nov 30, 2023
927ae5d
Added new descriptions for each of the settings elements
cmhhelgeson Dec 2, 2023
10d94cb
Implemented most ben-clayton naming suggestions
cmhhelgeson Dec 2, 2023
0e4b4f9
Fixed minor spacing issue to make prettier happy
cmhhelgeson Dec 2, 2023
04360b6
Added additional comments
cmhhelgeson Dec 4, 2023
ba7bba1
Final changes
cmhhelgeson Dec 4, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
export const computeArgKeys = ['width', 'height', 'algo', 'blockHeight'];

export const NaiveBitonicCompute = (threadsPerWorkgroup: number) => {
if (threadsPerWorkgroup % 2 !== 0 || threadsPerWorkgroup > 256) {
threadsPerWorkgroup = 256;
export const NaiveBitonicCompute = (invocationsPerWorkgroup: number) => {
cmhhelgeson marked this conversation as resolved.
Show resolved Hide resolved
if (invocationsPerWorkgroup % 2 !== 0 || invocationsPerWorkgroup > 256) {
invocationsPerWorkgroup = 256;
}
// Ensure that workgroupSize is half the number of elements
return `
Expand All @@ -15,7 +15,7 @@ struct Uniforms {
}

// Create local workgroup data that can contain all elements
var<workgroup> local_data: array<u32, ${threadsPerWorkgroup * 2}>;
var<workgroup> local_data: array<u32, ${invocationsPerWorkgroup * 2}>;

// Define groups (functions refer to this data)
@group(0) @binding(0) var<storage, read> input_data: array<u32>;
Expand All @@ -35,25 +35,25 @@ fn local_compare_and_swap(idx_before: u32, idx_after: u32) {
return;
}

// thread_id goes from 0 to threadsPerWorkgroup
fn get_flip_indices(thread_id: u32, block_height: u32) -> vec2<u32> {
// invoke_id goes from 0 to invocationsPerWorkgroup
cmhhelgeson marked this conversation as resolved.
Show resolved Hide resolved
fn get_flip_indices(invoke_id: u32, block_height: u32) -> vec2<u32> {
// Caculate index offset (i.e move indices into correct block)
let block_offset: u32 = ((2 * thread_id) / block_height) * block_height;
let block_offset: u32 = ((2 * invoke_id) / block_height) * block_height;
let half_height = block_height / 2;
// Calculate index spacing
var idx: vec2<u32> = vec2<u32>(
thread_id % half_height, block_height - (thread_id % half_height) - 1,
invoke_id % half_height, block_height - (invoke_id % half_height) - 1,
);
idx.x += block_offset;
idx.y += block_offset;
return idx;
}

fn get_disperse_indices(thread_id: u32, block_height: u32) -> vec2<u32> {
var block_offset: u32 = ((2 * thread_id) / block_height) * block_height;
fn get_disperse_indices(invoke_id: u32, block_height: u32) -> vec2<u32> {
var block_offset: u32 = ((2 * invoke_id) / block_height) * block_height;
let half_height = block_height / 2;
var idx: vec2<u32> = vec2<u32>(
thread_id % half_height, (thread_id % half_height) + half_height
invoke_id % half_height, (invoke_id % half_height) + half_height
);
idx.x += block_offset;
idx.y += block_offset;
Expand All @@ -73,20 +73,20 @@ const ALGO_LOCAL_FLIP = 1;
const ALGO_LOCAL_DISPERSE = 2;
const ALGO_GLOBAL_FLIP = 3;

// Our compute shader will execute specified # of threads or elements / 2 threads
@compute @workgroup_size(${threadsPerWorkgroup}, 1, 1)
// Our compute shader will execute specified # of invocations or elements / 2 invocations
@compute @workgroup_size(${invocationsPerWorkgroup}, 1, 1)
fn computeMain(
@builtin(global_invocation_id) global_id: vec3<u32>,
@builtin(local_invocation_id) local_id: vec3<u32>,
@builtin(workgroup_id) workgroup_id: vec3<u32>,
) {

let offset = ${threadsPerWorkgroup} * 2 * workgroup_id.x;
let offset = ${invocationsPerWorkgroup} * 2 * workgroup_id.x;
// If we will perform a local swap, then populate the local data
if (uniforms.algo <= 2) {
// Assign range of input_data to local_data.
// Range cannot exceed maxWorkgroupsX * 2
// Each thread will populate the workgroup data... (1 thread for every 2 elements)
// Each invocation will populate the workgroup data... (1 invocation for every 2 elements)
local_data[local_id.x * 2] = input_data[offset + local_id.x * 2];
local_data[local_id.x * 2 + 1] = input_data[offset + local_id.x * 2 + 1];
}
Expand Down Expand Up @@ -116,7 +116,7 @@ fn computeMain(
}
}

// Ensure that all threads have swapped their own regions of data
// Ensure that all invocations have swapped their own regions of data
workgroupBarrier();

if (uniforms.algo <= ALGO_LOCAL_DISPERSE) {
Expand Down
Loading
Loading