diff --git a/build/src/build.bqn b/build/src/build.bqn index f3db89ba..052abf32 100755 --- a/build/src/build.bqn +++ b/build/src/build.bqn @@ -650,8 +650,8 @@ cachedBin‿linkerCache ← { "xag"‿"src/builtins/search.c"‿"search", "xa."‿"src/builtins/fold.c"‿"fold", "xag"‿"src/builtins/sort.c"‿"bins" - "2.."‿"src/builtins/select.c"‿"select", "2.."‿"src/builtins/scan.c"‿"scan", - "2a."‿"src/builtins/slash.c"‿"constrep", "2.."‿"src/builtins/scan.c"‿"neq", + "2.."‿"src/builtins/select.c"‿"select", "xag"‿"src/builtins/scan.c"‿"scan", + "2a."‿"src/builtins/slash.c"‿"constrep", "xag"‿"src/builtins/slash.c"‿"slash", "2.."‿"src/builtins/slash.c"‿"count" ⟩ objs ← ⟨⟩ diff --git a/src/builtins/grade.h b/src/builtins/grade.h index fe0b5cd3..708edf14 100644 --- a/src/builtins/grade.h +++ b/src/builtins/grade.h @@ -78,22 +78,22 @@ } #if SINGELI_AVX2 -extern void (*const avx2_scan_max_i8)(int8_t* v0,int8_t* v1,uint64_t v2); -extern void (*const avx2_scan_min_i8)(int8_t* v0,int8_t* v1,uint64_t v2); -extern void (*const avx2_scan_max_i16)(int16_t* v0,int16_t* v1,uint64_t v2); -extern void (*const avx2_scan_min_i16)(int16_t* v0,int16_t* v1,uint64_t v2); +extern void (*const si_scan_max_i8)(int8_t* v0,int8_t* v1,uint64_t v2); +extern void (*const si_scan_min_i8)(int8_t* v0,int8_t* v1,uint64_t v2); +extern void (*const si_scan_max_i16)(int16_t* v0,int16_t* v1,uint64_t v2); +extern void (*const si_scan_min_i16)(int16_t* v0,int16_t* v1,uint64_t v2); #define COUNT_THRESHOLD 32 #define WRITE_SPARSE_i8 \ for (usz i=0; i>6] = vg_def_u64(rp[ia>>6]); #endif @@ -78,8 +74,8 @@ B scan_add_bool(B x, u64 ia) { // consumes x } else { void* rp = m_tyarrv(&r, elWidth(re), ia, el2t(re)); #define SUM_BITWISE(T) { T c=0; for (usz i=0; i> n) # Forward or backwards in-place max-scan # Assumes a whole number of vectors and minimum 0 +include './scan_common' fn max_scan{T, up}(x:*T, len:u64) : void = { - def w = width{T} - if (hasarch{'AVX2'} and T!=u64) { + if (hasarch{'X86_64'}) { def op = max - # TODO unify with scan.singeli avx2_scan_idem - def rev{a} = if (up) a else (tuplen{a}-1)-reverse{a} - def maker{T, l} = make{T, rev{l}} - def sel8{v, t} = sel{[16]u8, v, maker{[32]i8, t}} - def sel8{v, t & istup{t} & tuplen{t}==16} = sel8{v, merge{t,t}} - def shuf{T, v, n & istup{n}} = shuf{T, v, base{4,rev{n}}} - def spread{a:VT} = { - def w = elwidth{VT} - def b = w/8 - if (w<=16) sel8{a,merge{iota{12},(16-b)+iota{4}%b}}; else a - } - def shift{k,l} = merge{iota{k},iota{l-k}} - def c8 {k, a} = op{a, shuf{[4]u32, a, shift{k,4}}} - def c32{k, a} = (if (w<=8*k) op{a, sel8{a, shift{k,16}}}; else a) - def pre{a} = { - b:= c8{2, c8{1, c32{2, c32{1, a}}}} - op{b, sel{[8]i32, spread{b}, maker{[8]i32, 3*(3 m) m = x; x = m } } diff --git a/src/singeli/src/neq.singeli b/src/singeli/src/neq.singeli deleted file mode 100644 index 11aef7d6..00000000 --- a/src/singeli/src/neq.singeli +++ /dev/null @@ -1,29 +0,0 @@ -include './base' -include './sse' -include './clmul' - -fn clmul_scan_ne_any(x:*void, r:*void, init:u64, words:u64, mark:u64) : void = { - def V = [2]u64 - m := V**mark - def xor64{a, i, carry} = { # carry is 64-bit broadcasted current total - p := clmul{a, m, i} - t := shr{[16]u8, p, 8} - s := p ^ carry - carry = s ^ t - s - } - xv := *V ~~ x - rv := *V ~~ r - e := words/2 - c := V**init - @for (rv, xv over e) { - rv = apply{zipLo, (@collect (j to 2) xor64{xv, j, c})} - } - if (words & 1) { - storeLow{rv+e, 64, clmul{loadLow{xv+e, 64}, m, 0} ^ c} - } -} -fn clmul_scan_ne_bit(init:u64, x:*u64, r:*u64, ia:u64) : void = { - clmul_scan_ne_any(*void~~x, *void~~r, init, ia, -(u64~~1)) -} -export{'clmul_scan_ne', clmul_scan_ne_bit} diff --git a/src/singeli/src/scan.singeli b/src/singeli/src/scan.singeli index 197680f8..773e8675 100644 --- a/src/singeli/src/scan.singeli +++ b/src/singeli/src/scan.singeli @@ -1,30 +1,19 @@ include './base' include './sse' +include './clmul' include './avx' include './avx2' include './mask' include './f64' +include './scan_common' -def sel8{v, t} = sel{[16]u8, v, make{[32]i8, t}} -def sel8{v, t & istup{t} & tuplen{t}==16} = sel8{v, merge{t,t}} - -def shuf{T, v, n & istup{n}} = shuf{T, v, base{4,n}} - -# Fill last 4 bytes with last element, in each lane -def spread{a:VT} = { - def w = elwidth{VT} - def b = w/8 - if (w<=16) sel8{a,merge{iota{12},(16-b)+iota{4}%b}}; else a -} - -# Set all elements with the last element of the input -def toLast{n:VT} = { - if (elwidth{VT}<=32) sel{[8]i32, spread{n}, [8]i32**7} - else shuf{[4]u64, n, 4b3333} +# Initialized scan, generic implementation +fn scan_scal{T, op}(x:*T, r:*T, len:u64, m:T) : void = { + @for (x, r over len) r = m = op{m, x} } def scan_loop{T, init, x:*T, r:*T, len:u64, scan, scan_last} = { - def step = 256/width{T} + def step = arch_defvw/width{T} def V = [step]T p:= V**init xv:= *V ~~ x @@ -45,64 +34,85 @@ def scan_post{T, init, x:*T, r:*T, len:u64, op, pre} = { } # Associative scan ?` if a?b?a = a?b = b?a, used for ⌊⌈ -fn avx2_scan_idem{T, op}(x:*T, r:*T, len:u64, init:T) : void = { - # Within each lane, scan using shifts by powers of 2. First k elements - # when shifting by k don't need to change, so leave them alone. - def w = width{T} - def shift{k,l} = merge{iota{k},iota{l-k}} - def c8 {k, a} = op{a, shuf{[4]u32, a, shift{k,4}}} - def c32{k, a} = (if (w<=8*k) op{a, sel8{a, shift{k,16}}}; else a) - # Prefix op on entire AVX register - def pre{a} = { - b:= c8{2, c8{1, c32{2, c32{1, a}}}} - # After lanewise scan, broadcast end of lane 0 to entire lane 1 - op{b, sel{[8]i32, spread{b}, make{[8]i32, 3*(3sh{4b1110,sh{4b2200,a}}} -} -export{'avx2_scan_min_init_i8', avx2_scan_idem{i8 , min}}; export{'avx2_scan_max_init_i8', avx2_scan_idem{i8 , max}} -export{'avx2_scan_min_init_i16', avx2_scan_idem{i16, min}}; export{'avx2_scan_max_init_i16', avx2_scan_idem{i16, max}} -export{'avx2_scan_min_init_i32', avx2_scan_idem{i32, min}}; export{'avx2_scan_max_init_i32', avx2_scan_idem{i32, max}} -export{'avx2_scan_min_init_f64', avx2_scan_idem{f64, min}}; export{'avx2_scan_max_init_f64', avx2_scan_idem{f64, max}} +export{'si_scan_min_init_i8', scan_idem{i8 , min}}; export{'si_scan_max_init_i8', scan_idem{i8 , max}} +export{'si_scan_min_init_i16', scan_idem{i16, min}}; export{'si_scan_max_init_i16', scan_idem{i16, max}} +export{'si_scan_min_init_i32', scan_idem{i32, min}}; export{'si_scan_max_init_i32', scan_idem{i32, max}} +export{'si_scan_min_init_f64', scan_idem{f64, min}}; export{'si_scan_max_init_f64', scan_idem{f64, max}} -fn avx2_scan_idem_id{T, op}(x:*T, r:*T, len:u64) : void = { - def m = 1 << (width{T}-1) - def id = (if (same{op,min}) m-1; else -m) - avx2_scan_idem{T, op}(x, r, len, id) +fn scan_idem_id{T, op}(x:*T, r:*T, len:u64) : void = { + scan_idem{T, op}(x, r, len, get_id{op, T}) } -export{'avx2_scan_min_i8', avx2_scan_idem_id{i8 , min}}; export{'avx2_scan_max_i8', avx2_scan_idem_id{i8 , max}} -export{'avx2_scan_min_i16', avx2_scan_idem_id{i16, min}}; export{'avx2_scan_max_i16', avx2_scan_idem_id{i16, max}} -export{'avx2_scan_min_i32', avx2_scan_idem_id{i32, min}}; export{'avx2_scan_max_i32', avx2_scan_idem_id{i32, max}} +export{'si_scan_min_i8', scan_idem_id{i8 , min}}; export{'si_scan_max_i8', scan_idem_id{i8 , max}} +export{'si_scan_min_i16', scan_idem_id{i16, min}}; export{'si_scan_max_i16', scan_idem_id{i16, max}} +export{'si_scan_min_i32', scan_idem_id{i32, min}}; export{'si_scan_max_i32', scan_idem_id{i32, max}} # Assumes identity is 0 -def scan_assoc{op, a:T} = { - # Within each lane, scan using shifts by powers of 2 - def w = elwidth{T} - def c32{k, a} = (if (w<=8*k) op{a, shl{[16]u8, a, k}}; else a) - b:= c32{8, c32{4, c32{2, c32{1, a}}}} - # After lanewise scan, broadcast end of lane 0 to entire lane 1 - l:= (type{b}~~make{[8]i32,0,0,0,-1,0,0,0,0}) & spread{b} - op{b, sel{[8]i32, l, make{[8]i32,0,0,0,0, 3,3,3,3}}} +def scan_assoc{op} = { + def shl0{v, k} = shl{[16]u8, v, k/8} # Lanewise + def shl0{v:V, k==128 & hasarch{'AVX2'}} = { + # Broadcast end of lane 0 to entire lane 1 + l:= V~~make{[8]i32,0,0,0,-1,0,0,0,0} & spread{v} + sel{[8]i32, l, make{[8]i32, 3*(3>63) # repeat sign bit + } +} +fn clmul_scan_ne_any{..._ & hasarch{'PCLMUL'}}(x:*void, r:*void, init:u64, words:u64, mark:u64) : void = { + def V = [2]u64 + m := V**mark + def xor64{a, i, carry} = { # carry is 64-bit broadcasted current total + p := clmul{a, m, i} + t := shr{[16]u8, p, 8} + s := p ^ carry + carry = s ^ t + s + } + xv := *V ~~ x + rv := *V ~~ r + e := words/2 + c := V**init + @for (rv, xv over e) { + rv = apply{zipLo, (@collect (j to 2) xor64{xv, j, c})} + } + if (words & 1) { + storeLow{rv+e, 64, clmul{loadLow{xv+e, 64}, m, 0} ^ c} + } +} +fn scan_neq{..._ & hasarch{'PCLMUL'}}(init:u64, x:*u64, r:*u64, nw:u64) : void = { + clmul_scan_ne_any{}(*void~~x, *void~~r, init, nw, -(u64~~1)) +} +export{'si_scan_ne', scan_neq{}} # Boolean cumulative sum -fn avx2_bcs{T}(x:*u64, r:*T, l:u64) : void = { +fn bcs{T}(x:*u64, r:*T, l:u64) : void = { + def bitp_get{arr, n} = (load{arr, n>>6} >> (n&63)) & 1 + c:T = 0 + @for (r over i to l) { c+= cast_i{T, bitp_get{x,i}}; r = c } +} +fn bcs{T & hasarch{'AVX2'}}(x:*u64, r:*T, l:u64) : void = { def U = ty_u{T} def w = width{T} def vl= 256 / w @@ -157,9 +167,9 @@ fn avx2_bcs{T}(x:*u64, r:*T, l:u64) : void = { step{load{xv, e}, e, st} } } -export{'avx2_bcs8', avx2_bcs{i8}} -export{'avx2_bcs16', avx2_bcs{i16}} -export{'avx2_bcs32', avx2_bcs{i32}} +export{'si_bcs8', bcs{i8}} +export{'si_bcs16', bcs{i16}} +export{'si_bcs32', bcs{i32}} @@ -190,7 +200,23 @@ def maxabsval{T & issigned{T}} = -minvalue{T} def maxsafeint{T & issigned{T}} = maxvalue{T} def maxsafeint{T==f64} = 1<<53 -def simd_plus_scan{X, b, R}{x:*X, c:(R), r:*R, len:u64} = { +fn plus_scan{X, R, O}(x:*X, c:R, r:*R, len:u64) : O = { + i:u64 = 0 + if (hasarch{'AVX2'}) simd_plus_scan_part{X,R}{x, c, r, len, i} + @forUnroll{1,1} (js from i to len) { + def vs = eachx{load, x, js} + each{{j, v} => { + def {b,n} = addChk{c, promote{R, v}} + if (rare{b}) return{j} + store{r, j, n} + c = n + }, js, vs} + } + len +} +# Sum as many vector registers as possible; modifies c and i +def simd_plus_scan_part{X, R}{x:*X, c:(R), r:*R, len:u64, i:u64} = { + def b = max{width{R}/2, width{X}} def bulk = arch_defvw/b def wd = (X!=R) & (width{X}<32) # whether to widen the working copy one size @@ -203,7 +229,6 @@ def simd_plus_scan{X, b, R}{x:*X, c:(R), r:*R, len:u64} = { if (R!=f64) { def m = maxFastA + maxFastE*bulk; assert{m<=maxvalue{R}}; assert{-m>=minvalue{R}} } - i:u64 = 0 cv:= [arch_defvw/width{R}]R ** c if (R==f64 and c != floor{c}) goto{'end'} @@ -237,24 +262,13 @@ def simd_plus_scan{X, b, R}{x:*X, c:(R), r:*R, len:u64} = { setlabel{'end'} c = extract{cv, 0} - - @forUnroll{1,1} (js from i to len) { - def vs = eachx{load, x, js} - each{{j, v} => { - def {b,n} = addChk{c, promote{R, v}} - if (rare{b}) return{j} - store{r, j, n} - c = n - }, js, vs} - } - len } -fn simd_plus_scanG{X, b, R}(x:*X, c:R, r:*R, len:u64) : void = simd_plus_scan{X,b,R}{x, c, r, len} -fn simd_plus_scanC{X, b, R}(x:*X, c:R, r:*R, len:u64) : u64 = simd_plus_scan{X,b,R}{x, c, r, len} +def plus_scanG{X, R} = plus_scan{X, R, void} +def plus_scanC{X, R} = plus_scan{X, R, u64} -export{'simd_scan_plus_i8_i32', simd_plus_scanC{i8, 16, i32}} -export{'simd_scan_plus_i16_i32', simd_plus_scanC{i16, 16, i32}} -export{'simd_scan_plus_i32_i32', simd_plus_scanC{i32, 32, i32}} +export{'si_scan_plus_i8_i32', plus_scanC{i8, i32}} +export{'si_scan_plus_i16_i32', plus_scanC{i16, i32}} +export{'si_scan_plus_i32_i32', plus_scanC{i32, i32}} -export{'simd_scan_plus_i16_f64', simd_plus_scanG{i16, 32, f64}} -export{'simd_scan_plus_i32_f64', simd_plus_scanG{i32, 32, f64}} +export{'si_scan_plus_i16_f64', plus_scanG{i16, f64}} +export{'si_scan_plus_i32_f64', plus_scanG{i32, f64}} diff --git a/src/singeli/src/scan_common.singeli b/src/singeli/src/scan_common.singeli new file mode 100644 index 00000000..a82f2969 --- /dev/null +++ b/src/singeli/src/scan_common.singeli @@ -0,0 +1,69 @@ +# Used by scan.singeli and bins.singeli + +def sel8{v:V, t} = sel{[16]u8, v, make{re_el{i8,V}, t}} +def sel8{v:V, t & w256{V} & istup{t} & tuplen{t}==16} = sel8{v, merge{t,t}} + +def shuf{T, v, n & istup{n}} = shuf{T, v, base{4,n}} + +local def rev{t} = { def l=tuplen{t}; def j=l-1; tupsel{j-range{l}, j-t} } +local def rev{up,t} = if (up) t else rev{t} +def sel8{v, t, up} = sel8{v, rev{up,t}} + +def zip{up, x} = (if (up) zipHi else zipLo){x,x} + +# Fill last 4 bytes with last element, in each lane +def spread{a:VT, ...up} = { + def w = elwidth{VT} + def b = w/8 + if (w<=16) sel8{a,merge{iota{12},(16-b)+iota{4}%b}, ...up}; else a +} + +# Set all elements with the last element of the input +def toLast{n:VT, up & hasarch{'X86_64'} & w128{VT}} = { + def l{v, w} = l{zip{up,v}, 2*w} + def l{v, w & hasarch{'SSSE3'}} = sel8{v, up*(16-w/8)+iota{16}%(w/8)} + def l{v, w & w>=32} = shuf{[4]i32, v, 4**(up*3)} + l{n, elwidth{VT}} +} +def toLast{n:VT, up & hasarch{'AVX2'} & w256{VT}} = { + if (elwidth{VT}<=32) sel{[8]i32, spread{n,up}, [8]i32**(up*7)} + else shuf{[4]u64, n, 4**(up*3)} +} +def toLast{n:VT} = toLast{n, 1} + +# Make prefix scan from op and shifter by applying the operation +# at increasing power-of-two shifts +def prefix_byshift{op, sh} = { + def pre{v:V, k} = if (k < width{V}) pre{op{v, sh{v,k}}, 2*k} else v + {v:T} => pre{v, if (isvec{T}) elwidth{T} else 1} +} + +def get_id{op,T} = (match (op) { {_==min}=>maxvalue; {_==max}=>minvalue }){T} + +def make_scan_idem{T, op, up} = { + # Within each lane, scan using shifts by powers of 2. First k elements + # when shifting by k don't need to change, so leave them alone. + def shift{k,l} = rev{up, merge{iota{k},iota{l-k}}} + def shb{v:V, k} = { + def w=width{T}; def c = k/w + def merger{a,b} = if (up) merge{a,b} else merge{b,a} + def id = make{V, merger{c**get_id{op,T}, (width{V}/w-c)**0}} + (if (up) shl else shr){[16]u8, v, k/8} | id + } + def shb{v, k & hasarch{'SSSE3'}} = sel8{v, shift{k/8,16}} + def shb{v, k & k>=32} = shuf{[4]u32, v, shift{k/32,4}} + def shb{v, k & k==128 & hasarch{'AVX2'}} = { + # After lanewise scan, broadcast end of lane 0 to entire lane 1 + sel{[8]i32, spread{v,up}, make{[8]i32, rev{up,3*(3