diff --git a/build/src/build.bqn b/build/src/build.bqn index 9db098e9..bb4d3d4e 100755 --- a/build/src/build.bqn +++ b/build/src/build.bqn @@ -638,7 +638,7 @@ cachedBin‿linkerCache ← { "2.."‿"src/builtins/select.c"‿"select", "2.."‿"src/builtins/scan.c"‿"scan", "2a."‿"src/builtins/slash.c"‿"constrep", "2.."‿"src/builtins/scan.c"‿"neq", - "2.."‿"src/builtins/slash.c"‿"slash", "2.."‿"src/builtins/slash.c"‿"count" + "xag"‿"src/builtins/slash.c"‿"slash", "2.."‿"src/builtins/slash.c"‿"count" ⟩ objs ← ⟨⟩ diff --git a/src/builtins/slash.c b/src/builtins/slash.c index 731bd562..b39112e0 100644 --- a/src/builtins/slash.c +++ b/src/builtins/slash.c @@ -4,7 +4,7 @@ // Boolean 𝕨 (Where/Compress) general case based on result type width // COULD use AVX-512 // Size 1: pext, or bit-at-a-time -// SHOULD emulate pext if unavailable +// Emulate pext if unavailable // COULD return boolean result from Where // Size 8, 16: pdep/pext, or branchless // SHOULD try vector lookup-shuffle if unavailable or old AMD @@ -70,15 +70,18 @@ #define _pdep_u64 vg_pdep_u64 #else #define vg_loadLUT64(p, i) p[i] - #define rand_popc64(X) POPC(X) #endif static void storeu_u64(u64* p, u64 v) { memcpy(p, &v, 8); } static u64 loadu_u64(u64* p) { u64 v; memcpy(&v, p, 8); return v; } - #if SINGELI_AVX2 - #define SINGELI_FILE slash - #include "../utils/includeSingeli.h" - #endif +#endif +#if !USE_VALGRIND + #define rand_popc64(X) POPC(X) +#endif + +#if SINGELI + #define SINGELI_FILE slash + #include "../utils/includeSingeli.h" #endif #if SINGELI_AVX2 || SINGELI_NEON @@ -437,13 +440,17 @@ static B compress(B w, B x, usz wia, u8 xl, u8 xt) { default: r = compress_grouped(wp, x, wia, wsum, xt); break; case 0: { u64* xp = bitarr_ptr(x); u64* rp; - #if defined(__BMI2__) + #if defined(__BMI2__) || SINGELI r = m_bitarrv(&rp,wsum+128); a(r)->ia = wsum; u64 cw = 0; // current word u64 ro = 0; // offset in word where next bit should be written; never 64 for (usz i=0; i 1} = { + def h = k>>1 # Increase size from h to k + {x,z} := build{h} + def low = lowbits{k} # Low bit in each new group + if (k <= 3) { + z0 := z & low + zm := z>>1 & low + if (k == 2) tup{ + x - (x>>1 & z0), + z0 + zm + } else tup{ # Faster 1->3 jump, currently unused + x - ((x>>1&mod{low*3}) & (z|z0<<1)) - (x>>2 & (z & zm)), + (z0 + zm) + (z>>2 & low) + } + } else { + # Shift high x group down by low z, then add halves of z + even:T = mod{low*(1<>sh | (x &~ m) + if (2*sh<=k/2) shift{2*sh, o>>1, s} else s + } + tup{ + (x&even) | shift{1, z, x&~even}, + if (k>4) (z + z>>h)&even else ((z&~even)>>h) + (z&even) + } + } + } + # Finally, compose groups with regular shifts + def g = 8 # 12 performs about the same + {b,z} := build{g} + o := z*lowbits{g} # Offsets by prefix sum + def s = 1<> (o>>(sh-g) & s) + fold{|, b&s, each{gr, g*slice{iota{cdiv{w,g}},1}}} +} + +export{'si_pext_u64', pext{u64}}