From 76235ca6445c376a3e60d1e36dd711b5647cc827 Mon Sep 17 00:00:00 2001 From: Thomas Smith <68340554+b0nes164@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:01:42 -0700 Subject: [PATCH] Style fixes --- vello/src/render.rs | 4 +- vello/src/shaders.rs | 2 +- vello_shaders/shader/pathtag_scan_csdldf.wgsl | 108 +++++++++--------- vello_shaders/shader/shared/pathtag.wgsl | 2 +- vello_shaders/src/cpu.rs | 2 +- vello_shaders/src/cpu/pathtag_scan_single.rs | 6 +- 6 files changed, 62 insertions(+), 62 deletions(-) diff --git a/vello/src/render.rs b/vello/src/render.rs index 6e874944..68e0270b 100644 --- a/vello/src/render.rs +++ b/vello/src/render.rs @@ -200,11 +200,11 @@ impl Render { ); let reduced_buf = BufferProxy::new( buffer_sizes.path_reduced.size_in_bytes().into(), - "reduced_buf", + "vello.reduced_buf", ); let path_scan_bump_buf = BufferProxy::new( buffer_sizes.path_scan_bump.size_in_bytes().into(), - "path_scan_bump_buf", + "vello.path_scan_bump_buf", ); recording.clear_all(path_scan_bump_buf); recording.clear_all(reduced_buf); diff --git a/vello/src/shaders.rs b/vello/src/shaders.rs index 24192894..7b0ff1ee 100644 --- a/vello/src/shaders.rs +++ b/vello/src/shaders.rs @@ -97,7 +97,7 @@ pub(crate) fn full_shaders( let pathtag_scan_csdldf = add_shader!( pathtag_scan_csdldf, [Uniform, BufReadOnly, Buffer, Buffer, Buffer], - CpuShaderType::Present(vello_shaders::cpu::pathtag_scan_single) + CpuShaderType::Present(vello_shaders::cpu::pathtag_scan) ); let bbox_clear = add_shader!(bbox_clear, [Uniform, Buffer]); diff --git a/vello_shaders/shader/pathtag_scan_csdldf.wgsl b/vello_shaders/shader/pathtag_scan_csdldf.wgsl index f50ea503..0ca8ba7b 100644 --- a/vello_shaders/shader/pathtag_scan_csdldf.wgsl +++ b/vello_shaders/shader/pathtag_scan_csdldf.wgsl @@ -17,7 +17,7 @@ var reduced: array, 5>>; var tag_monoids: array>; @group(0) @binding(4) -var scan_bump: array>; +var scan_bump: atomic; //Workgroup info let LG_WG_SIZE = 8u; @@ -53,11 +53,11 @@ fn attempt_lookback( ){ let payload: u32 = atomicLoad(&reduced[lookback_ix][member_ix]); let flag_value: u32 = payload & FLAG_MASK; - if(flag_value == FLAG_REDUCTION){ + if flag_value == FLAG_REDUCTION { *spin_count = 0u; *prev += payload >> 2u; *reduction_complete = true; - } else if (flag_value == FLAG_INCLUSIVE){ + } else if flag_value == FLAG_INCLUSIVE { *spin_count = 0u; *prev += payload >> 2u; atomicStore(&reduced[part_ix][member_ix], ((aggregate + *prev) << 2u) | FLAG_INCLUSIVE); @@ -77,12 +77,12 @@ fn fallback( ){ let fallback_payload = (fallback_aggregate << 2u) | select(FLAG_INCLUSIVE, FLAG_REDUCTION, fallback_ix != 0u); let prev_payload = atomicMax(&reduced[fallback_ix][member_ix], fallback_payload); - if(prev_payload == 0u){ + if prev_payload == 0u { *prev += fallback_aggregate; } else { *prev += prev_payload >> 2u; } - if(fallback_ix == 0u || (prev_payload & FLAG_MASK) == FLAG_INCLUSIVE){ + if fallback_ix == 0u || (prev_payload & FLAG_MASK) == FLAG_INCLUSIVE { atomicStore(&reduced[part_ix][member_ix], ((aggregate + *prev) << 2u) | FLAG_INCLUSIVE); sh_tag_broadcast[member_ix] = *prev; *inclusive_complete = true; @@ -94,8 +94,8 @@ fn main( @builtin(local_invocation_id) local_id: vec3, ) { //acquire the partition index, set the lock - if(local_id.x == 0u){ - sh_broadcast = atomicAdd(&scan_bump[0u], 1u); + if local_id.x == 0u { + sh_broadcast = atomicAdd(&scan_bump, 1u); sh_lock = LOCKED; } workgroupBarrier(); @@ -121,7 +121,7 @@ fn main( //Broadcast the results and flag into device memory if local_id.x == WG_SIZE - 1u { - if(part_ix != 0u){ + if part_ix != 0u { atomicStore(&reduced[part_ix][0], (agg[0] << 2u) | FLAG_REDUCTION); atomicStore(&reduced[part_ix][1], (agg[1] << 2u) | FLAG_REDUCTION); atomicStore(&reduced[part_ix][2], (agg[2] << 2u) | FLAG_REDUCTION); @@ -137,7 +137,7 @@ fn main( } //Lookback and potentially fallback - if(part_ix != 0u){ + if part_ix != 0u { var lookback_ix = part_ix - 1u; var inc0: bool = false; @@ -163,15 +163,15 @@ fn main( //Lookback, with a single thread //Last thread in the workgroup has the complete aggregate - if(local_id.x == WG_SIZE - 1u){ - for(var spin_count: u32 = 0u; spin_count < MAX_SPIN_COUNT; ){ + if local_id.x == WG_SIZE - 1u { + for (var spin_count: u32 = 0u; spin_count < MAX_SPIN_COUNT; ) { //TRANS_IX - if(!inc0 && !red0){ + if !inc0 && !red0 { attempt_lookback( part_ix, lookback_ix, 0u, - agg[0u], + agg[0], &spin_count, &prev0, &red0, @@ -179,12 +179,12 @@ fn main( } //PATHSEG_IX - if(!inc1 && !red1){ + if !inc1 && !red1 { attempt_lookback( part_ix, lookback_ix, 1u, - agg[1u], + agg[1], &spin_count, &prev1, &red1, @@ -192,12 +192,12 @@ fn main( } //PATHSEG_OFFSET - if(!inc2 && !red2){ + if !inc2 && !red2 { attempt_lookback( part_ix, lookback_ix, 2u, - agg[2u], + agg[2], &spin_count, &prev2, &red2, @@ -205,12 +205,12 @@ fn main( } //STYLE_IX - if(!inc3 && !red3){ + if !inc3 && !red3 { attempt_lookback( part_ix, lookback_ix, 3u, - agg[3u], + agg[3], &spin_count, &prev3, &red3, @@ -218,12 +218,12 @@ fn main( } //PATH_IX - if(!inc4 && !red4){ + if !inc4 && !red4 { attempt_lookback( part_ix, lookback_ix, 4u, - agg[4u], + agg[4], &spin_count, &prev4, &red4, @@ -231,8 +231,8 @@ fn main( } //Have we completed the current reduction or inclusive sum for all PathTag members? - if((inc0 || red0) && (inc1 || red1) && (inc2 || red2) && (inc3 || red3) && (inc4 || red4)){ - if(inc0 && inc1 && inc2 && inc3 && inc4){ + if (inc0 || red0) && (inc1 || red1) && (inc2 || red2) && (inc3 || red3) && (inc4 || red4) { + if inc0 && inc1 && inc2 && inc3 && inc4 { sh_lock = UNLOCKED; break; } else { @@ -251,7 +251,7 @@ fn main( //If we didn't complete the lookback within the allotted spins, //prepare for the fallback by broadcasting the lookback tile id //and states of the tagmonoid struct members - if(sh_lock == LOCKED){ + if sh_lock == LOCKED { sh_broadcast = lookback_ix; sh_fallback_state[0] = !inc0 && !red0; sh_fallback_state[1] = !inc1 && !red1; @@ -263,7 +263,7 @@ fn main( workgroupBarrier(); //Fallback - if(sh_lock == LOCKED){ + if sh_lock == LOCKED { let fallback_ix = sh_broadcast; red0 = sh_fallback_state[0]; @@ -282,106 +282,106 @@ fn main( workgroupBarrier(); if local_id.x + (1u << i) < WG_SIZE { let index = local_id.x + (1u << i); - if(red0){ + if red0 { f_agg[0] += sh_fallback[index][0]; } - if(red1){ + if red1 { f_agg[1] += sh_fallback[index][1]; } - if(red2){ + if red2 { f_agg[2] += sh_fallback[index][2]; } - if(red3){ + if red3 { f_agg[3] += sh_fallback[index][3]; } - if(red4){ + if red4 { f_agg[4] += sh_fallback[index][4]; } } workgroupBarrier(); - if(red0){ + if red0 { sh_fallback[local_id.x][0] = f_agg[0]; } - if(red1){ + if red1 { sh_fallback[local_id.x][1] = f_agg[1]; } - if(red2){ + if red2 { sh_fallback[local_id.x][2] = f_agg[2]; } - if(red3){ + if red3 { sh_fallback[local_id.x][3] = f_agg[3]; } - if(red4){ + if red4 { sh_fallback[local_id.x][4] = f_agg[4]; } } //Fallback and attempt insertion of status flag - if(local_id.x == WG_SIZE - 1u){ + if local_id.x == WG_SIZE - 1u { //TRANS_IX FALLBACK - if(red0){ + if red0 { fallback( part_ix, fallback_ix, 0u, - agg[0u], - f_agg[0u], + agg[0], + f_agg[0], &prev0, &inc0, ); } //PATHSEG_IX FALLBACK - if(red1){ + if red1 { fallback( part_ix, fallback_ix, 1u, - agg[1u], - f_agg[1u], + agg[1], + f_agg[1], &prev1, &inc1, ); } //PATHSEG_OFFSET FALLBACK - if(red2){ + if red2 { fallback( part_ix, fallback_ix, 2u, - agg[2u], - f_agg[2u], + agg[2], + f_agg[2], &prev2, &inc2, ); } //STYLE_IX FALLBACK - if(red3){ + if red3 { fallback( part_ix, fallback_ix, 3u, - agg[3u], - f_agg[3u], + agg[3], + f_agg[3], &prev3, &inc3, ); } //PATH_IX FALLBACK - if(red4){ + if red4 { fallback( part_ix, fallback_ix, 4u, - agg[4u], - f_agg[4u], + agg[4], + f_agg[4], &prev4, &inc4, ); @@ -389,7 +389,7 @@ fn main( //At this point, the reductions are guaranteed to be complete, //so try unlocking, else, keep looking back - if(inc0 && inc1 && inc2 && inc3 && inc4){ + if inc0 && inc1 && inc2 && inc3 && inc4 { sh_lock = UNLOCKED; } else { lookback_ix--; @@ -402,7 +402,7 @@ fn main( workgroupBarrier(); var tm: array; - if(part_ix != 0u){ + if part_ix != 0u { tm = sh_tag_broadcast; } else { tm[0] = 0u; @@ -412,7 +412,7 @@ fn main( tm[4] = 0u; } - if(local_id.x != 0u){ + if local_id.x != 0u { let other: array = sh_scratch[local_id.x - 1u]; tm[0] += other[0]; tm[1] += other[1]; diff --git a/vello_shaders/shader/shared/pathtag.wgsl b/vello_shaders/shader/shared/pathtag.wgsl index b62e7cef..aa4dc93a 100644 --- a/vello_shaders/shader/shared/pathtag.wgsl +++ b/vello_shaders/shader/shared/pathtag.wgsl @@ -84,4 +84,4 @@ fn reduce_tag_arr(tag_word: u32) -> array { c[4] = countOneBits(tag_word & (PATH_TAG_PATH * 0x1010101u)); c[3] = countOneBits(tag_word & (PATH_TAG_STYLE * 0x1010101u)) * STYLE_SIZE_IN_WORDS; return c; -} \ No newline at end of file +} diff --git a/vello_shaders/src/cpu.rs b/vello_shaders/src/cpu.rs index 2aa8d677..5df1a52c 100644 --- a/vello_shaders/src/cpu.rs +++ b/vello_shaders/src/cpu.rs @@ -42,7 +42,7 @@ pub use path_count::path_count; pub use path_count_setup::path_count_setup; pub use path_tiling::path_tiling; pub use path_tiling_setup::path_tiling_setup; -pub use pathtag_scan_single::pathtag_scan_single; +pub use pathtag_scan_single::pathtag_scan; pub use tile_alloc::tile_alloc; use std::cell::{Ref, RefCell, RefMut}; diff --git a/vello_shaders/src/cpu/pathtag_scan_single.rs b/vello_shaders/src/cpu/pathtag_scan_single.rs index 53858509..79c1b3a2 100644 --- a/vello_shaders/src/cpu/pathtag_scan_single.rs +++ b/vello_shaders/src/cpu/pathtag_scan_single.rs @@ -7,7 +7,7 @@ use super::CpuBinding; const WG_SIZE: usize = 256; -fn pathtag_scan_single_main( +fn pathtag_scan_main( n_wg: u32, config: &ConfigUniform, scene: &[u32], @@ -22,9 +22,9 @@ fn pathtag_scan_single_main( } } -pub fn pathtag_scan_single(n_wg: u32, resources: &[CpuBinding]) { +pub fn pathtag_scan(n_wg: u32, resources: &[CpuBinding]) { let config = resources[0].as_typed(); let scene = resources[1].as_slice(); let mut tag_monoids = resources[3].as_slice_mut(); - pathtag_scan_single_main(n_wg, &config, &scene, &mut tag_monoids); + pathtag_scan_main(n_wg, &config, &scene, &mut tag_monoids); }