diff --git a/src/builtins/grade.h b/src/builtins/grade.h index be95c6fc..06eb3ee2 100644 --- a/src/builtins/grade.h +++ b/src/builtins/grade.h @@ -97,9 +97,27 @@ extern void (*const si_scan_min_i16)(int16_t* v0,int16_t* v1,uint64_t v2); if (e==n) {break;} k=e; \ } #define WRITE_SPARSE(T) WRITE_SPARSE_##T -extern i8 (*const avx2_count_i8)(usz*, i8*, u64, i8); -#define SINGELI_COUNT_OR(T) \ - if (1==sizeof(T)) avx2_count_i8(c0o, (i8*)xp, n, -128); else +extern i8 (*const simd_count_i8)(u16*, u16*, void*, u64, i8); +#define COUNTING_SORT_i8 \ + usz C=1<<8; \ + TALLOC(u16, c0, C+(n>>15)+1); \ + u16 *c0o=c0+C/2; u16 *ov=c0+C; \ + for (usz j=0; j 0)) { // Overflowed i32! + r = taga(cpyF64Arr(r)); f64* rp = tyany_ptr(r); + for (usz i=0; imo) mo=c; + } + // Since mo is a multiple of 128 and all of r is less than 128, + // values in r can't affect the result type + #define RESIZE(T, UT) \ + r = taga(cpy##UT##Arr(r)); T* rp = tyany_ptr(r); \ + for (usz i=0; i0] = sum; rp[0] = xia - sum; r = num_squeeze(r); break; } -#define IIND_INT(N) \ +#if SINGELI_SIMD + #define INIT_RES(N,RIA) \ + i##N* rp; r = m_i##N##arrv(&rp, RIA); \ + for (usz i=0; ixp[a-1]) a++; \ u##N max=xp[a-1]; \ + usz rmax=xia; \ if (amax) max=c; } \ if ((i##N)max<0) thrM("/⁼: Argument cannot contain negative numbers"); \ usz ria = max + 1; \ if (xia < ria/8) { \ u8 maxcount = 0; \ TALLOC(u8, tab, ria); \ - for (usz i=0; i=16 && maxcount<127) { \ - i8* rp; r = m_i8arrv(&rp, ria); for (usz i=0; i=16 && maxcount<128) rmax=127; \ } \ } \ + usz ria = (usz)max + 1; \ if (a==xia) { /* Unique argument */ \ - usz ria = max + 1; \ u64* rp; r = m_bitarrv(&rp, ria); \ for (usz i=0; im/2) thrM("/⁼: Argument cannot contain negative numbers"); \ - i32* rp; r = m_i32arrv(&rp, ria); vfor (usz i=0; i>15; \ + TALLOC(u16, ov, os+1); \ + i##N max = simd_count_i##N((u16*)rp, (u16*)ov, xp, xia, 0); \ + if (max < 0) thrM("/⁼: Argument cannot contain negative numbers"); \ + usz ria = (usz)max + 1; \ + if (ria < sa) r = C2(take, m_f64(ria), r); \ + r = finish_small_count(r, ov); \ + TFREE(ov); \ + break; \ } -#if SINGELI_SIMD - #define SINGELI_COUNT_OR(N) if (N==8) { \ - TALLOC(usz, t, m/2); \ - for (usz j=0; jI32_MAX) thrM("/⁼: Argument too large"); + INIT_RES(32,ria) + simd_count_i32_i32(rp, xp, xia); + r = num_squeeze(r); break; + } + #undef TRY_SMALL_OUT + #undef INIT_RES +#else + #define CASE(N) case el_i##N: { \ + i##N* xp = i##N##any_ptr(x); \ + u##N max=xp[0]; \ + for (usz i=1; imax) max=c; } \ + if ((i##N)max<0) thrM("/⁼: Argument cannot contain negative numbers"); \ + usz ria = max + 1; \ + TALLOC(usz, t, ria); \ + for (usz j=0; jI32_MAX) thrM("/⁼: Argument too large"); i32* rp; r = m_i32arrv(&rp, ria); for (usz i=0; imax?c:max; if (c<0) thrM("/⁼: Argument cannot contain negative numbers"); } - if (max > USZ_MAX-1) thrOOM(); - usz ria = max+1; - if (i==xia) { - u64* rp; r = m_bitarrv(&rp, ria); for (usz i=0; i=4) { + def eq_k{k} = homAll{tree_fold{&, @unroll(x in xv+a over k) x==jv}} + def skip_eq{k} = if (eq_k{k}) { a=2*k; skip_eq{2*k} } + def skip_eq{k==4} = while (a<=b-k and eq_k{k}) a+=k + skip_eq{1} + } + # Now start analysis + @for (xv, xp in *V~~(x-1) over _ from a to b) { + jv = min{jv, xv}; mv = max{mv, xv} + dc -= xp != xv + } @for (x over _ from rv to r) { if (xmx) mx=x } jt := vfold{min, jv} mt := vfold{max, mv} if (jt < min_allowed) return{jt} if (mt > mx) mx = mt - nc := uT~~(mt - jt) # Number of counts to perform: last is implicit - if (nc <= 24*vbits/128) { + # Fast cases + dt := promote{u64, fold_addw{dc}} + nc := TU~~(mt - jt) # Number of counts to perform: last is implicit + if (dt < b * (vec/2) and (b + dt)*4 < b * promote{u64,nc}) { + r0 = count_with_runs{x, tab, r} + } else if (nc <= 24*vbits/128) { r0 = rv - j0 := promote{u64, uT~~jt} # Starting count - m := promote{u64, nc} # Number of iterations - total := trunc{usz, r0} # To compute last count - def count_each{js, num} = { - j := @collect (k to num) trunc{T, js+k} - c := copy{length{j}, [vec]uT ** 0} - e := each{{j}=>V**j, j} - @for (xv over b) each{{c,e} => c -= xv == e, c, e} - def add_sum{c, j} = { - s := promote{usz, fold_addw{c}} - total -= s; inc{tab, j, s} - } - each{add_sum, c, j} + count_by_sum{T, V, [vec]TU, xv, b, tab, r0, + promote{u64, TU~~jt}, # Starting count + promote{u64, nc} # Number of iterations } - m4 := m / 4 - @for (j4 to m4) count_each{j0 + 4*j4, 4} - @for (j from 4*m4 to m) count_each{j0 + j, 1} - inc{tab, trunc{T, j0 + m}, trunc{usz,total}} } # Scalar fallback and cleanup @for (x over _ from r0 to r) inc{tab, x} i += r x += r + + # Keep counts below 1<<15 with the overflow list + # Count from the end to include i==n and handle a long last block nicely + if ((i-n)%(1<<15) < block*vec and i >= 1<<15) { + ov += flush_counts(tab+min_allowed, ov, cast_i{usz,ty_u{mx+min_allowed}} + 1) + } } + store{ov, 0, maxvalue{u16}} # End marker: note x values fit in i16 mx } -export{'avx2_count_i8', count{i8}} +fn flush_counts(tab:*u16, ov:*u16, n:usz) : usz = { + def vl = arch_defvw/16 + def V = [vl]u16 + def bot = 1<<15 - 1 + on:usz = 0 + @for (t in *V~~tab over jv to cdiv{n, vl}) if (rare{topAny{t}}) { + o := if (hasarch{'X86_64'}) topMask{t} else homMask{t > V**bot} + if (jv == n/vl) o &= type{o}~~1<<(n%vl) - 1 + while (o > 0) { + jv := jv*vl + cast_i{usz, ctz{o}} + store{tab, jv, load{tab, jv} & bot} + store{ov, on, trunc{u16, jv}}; ++on + o &= o-1 + } + } + on +} + +# Sum comparisons against each value (except one) in the range +def count_by_sum{T, V, U, xv, b, tab, r0, j0, m} = { + total := trunc{usz, r0} # To compute last count + def count_each{js, num} = { + j := @collect (k to num) trunc{T, js+k} + c := length{j} ** U**0 + e := each{{j}=>V**j, j} + @for (xv over b) each{{c,e} => c -= xv == e, c, e} + def add_sum{c, j} = { + s := promote{usz, fold_addw{c}} + total -= s; inc{tab, j, s} + } + each{add_sum, c, j} + } + m4 := m / 4 + @for (j4 to m4) count_each{j0 + 4*j4, 4} + @for (j from 4*m4 to m) count_each{j0 + j, 1} + inc{tab, trunc{T, j0 + m}, total} +} + +# Count adjacent equal elements at once, breaking at w-element groups +# May read up to index n from x, hitting one element that's not counted +def count_with_runs{x, tab, n} = { + def w = width{ux} + m0:ux = 1 << (w-1) # Last element in each chunk ends a run + bw := n / w + @for (i to bw) { + xo := x + i*w + m := m0; mark_run_ends{xo, m} + inc_marked_runs{xo, tab, m, m0} + } + bw * w # Number of elements handled +} +# Switch to the normal scalar count if there aren't enough runs +def count_adapt_runs{x0, tab, n} = { + def w = width{ux} + m0:ux = 1 << (w-1) + x := x0; r := n + while (r > 0) { + def skip_runs = makelabel{} + b:usz = w + if (rare{b > r}) { b = r; goto{skip_runs} } + m := m0; mark_run_ends{x, m} + if (popc{m} < w/2) { + inc_marked_runs{x, tab, m, m0} + } else { + setlabel{skip_runs} + @for (x over b) inc{tab, x} + } + x += b; r -= b + } +} +def mark_run_ends{x:*T, m:(ux)} = { + def vec = arch_defvw/width{T} + def V = [vec]T + @unroll (j to width{ux} / vec) { + def jv = j*vec + def lv{k} = load{*V~~(x + k)} + m |= promote{ux, homMask{lv{jv} != lv{jv+1}}} << jv + } +} +def inc_marked_runs{x, tab:*T, m, m0} = { + def w = width{ux} + # Iterate over runs marked in m + jp:T = - T~~1 + while (m > m0) @unroll (2) { + j := trunc{T, ctz{m}} + inc{tab, load{x, j}, j - jp} + jp = j; m &= m-1 + } + # One step if popc{m} was odd, reducing branch mispredictions above + inc{tab, load{x, w-1}, ((w-1) - jp) & -trunc{T, m>>(w-1)}} +} + +# No count_by_sum: build each run mask then decide whether to use it +fn count_i32_i32(tab:*i32, x:*i32, n:usz) : void = count_adapt_runs{x, tab, n} + +# For i←/⁼x, store r←128|i, and i-r sparsely: x is ∧(/r)∾oc/ov +# ov is sorted but may not be unique, and oc contains multiples of 128 +# Return the shared length of ov and oc +fn count_sorted{T}(r:*u8, ov:*usz, oc:*usz, x:*T, n:usz) : usz = { + def V = [arch_defvw/width{T}]T + def block = 128 + i:usz = 0 + on:usz = 0 + def overflow{xu,c} = { store{ov, on, xu}; store{oc, on, c}; ++on } + while (i < n) { + rem := n - i + xo := x + i + xi := load{xo} + def overflow{c} = overflow{cast_i{usz,xi}, c} + xe := xo-1; def bxi{j} = xi == load{xe, j} + if (block <= rem and bxi{block}) { + # Gallop to find last block ending in xi + d:usz = block + d2 := undefined{usz} + while ((d2=d+d) <= rem and bxi{d2}) d = d2 + l := min{(rem &~ (block-1)) - d, d} + # Target is in [d,d+l); shrink l + while (l > block) { + h := (l/2) &~ (block-1) + m := d + h + if (bxi{m}) d = m + l -= h + } + overflow{d} + rem -= d; if (rem == 0) return{on} + i += d; xo += d; xi = load{xo} + } + # Count the next block normally + rem = min{rem, usz~~block} # TODO get rid of the need of the usz~~ here + count_adapt_runs{xo, r, rem} + rxi := load{r, xi} + if (rxi >= block) { + store{r, xi, rxi - block} + overflow{block} + } + i += rem + } + on +} + +export{'simd_count_i8', count{i8}} +export{'simd_count_i16', count{i16}} +export{'simd_count_i32_i32', count_i32_i32} +export{'si_count_sorted_i8', count_sorted{i8}} +export{'si_count_sorted_i16', count_sorted{i16}} +export{'si_count_sorted_i32', count_sorted{i32}} diff --git a/src/singeli/src/neon.singeli b/src/singeli/src/neon.singeli index 94e9195f..9b5c21b7 100644 --- a/src/singeli/src/neon.singeli +++ b/src/singeli/src/neon.singeli @@ -15,6 +15,8 @@ def ntyp{S, ...S2, T if w128{T}} = merge{S, 'q', ...S2, '_', nty{T}} def ntyp{S, ...S2, T if w64{T}} = merge{S, ...S2, '_', nty{T}} def ntyp0{S, T} = merge{S, '_', nty{T}} +def __neg{a:T if nvecu{T}} = T~~(-ty_s{T}~~a) + def __lt{a:T, 0 if nvecs{T} or nvecf{T}} = emit{ty_u{T}, ntyp{'vcltz', T}, a} def __le{a:T, 0 if nvecs{T} or nvecf{T}} = emit{ty_u{T}, ntyp{'vclez', T}, a} def __gt{a:T, 0 if nvecs{T} or nvecf{T}} = emit{ty_u{T}, ntyp{'vcgtz', T}, a}