mask.singeli def renames
more bits of renames
This commit is contained in:
parent
b6578e43a1
commit
92f40ddbe2
@ -237,7 +237,7 @@ For float conversions, the used rounding mode is unspecified.
|
||||
<!-- -->
|
||||
- `clz` - count leading zeroes
|
||||
- `cls` - count leading sign bits
|
||||
- `copyLane`
|
||||
- `copy_lane`
|
||||
- `mla` - multiply-add
|
||||
- `mls` - multiply-subtract
|
||||
- `ornot` - a|~b
|
||||
@ -249,22 +249,22 @@ For float conversions, the used rounding mode is unspecified.
|
||||
- `trn1`, `trn2` - 2×2 transposes from pairs of elements across the two arguments
|
||||
|
||||
<!--
|
||||
widenUpper{x:V}, widen{x:V}
|
||||
widen_upper{x:V}
|
||||
narrowPair, narrowUpper
|
||||
-->
|
||||
## mask.singeli
|
||||
|
||||
- `maskOf{TU, n}` - get a mask of type `TU` whose first `n` elements are all `1`s, and the remaining are `0`s. `0≤n≤vcount{TU}`
|
||||
- `maskNone` - a mask generator that matches all items
|
||||
- `maskAfter{n}` - a mask generator that patches the first `n` items
|
||||
- `loadBatch{p:*E, n, V}` - load the `n`-th batch of `vcount{V}` elements from `*E`; if `E` isn't `eltype{V}`, the result is sign- or zero-extended.
|
||||
- `storeBatch{p:*E, n, x:V, M}` - store equivalent of the `loadBatch`; if `E` isn't `eltype{V}`, `x` is narrowed via `narrow{}`
|
||||
- `mask_of_first{TU, n}` - get a mask of type `TU` whose first `n` elements are all `1`s, and the remaining are `0`s. `0≤n≤vcount{TU}`
|
||||
- `mask_none` - a mask generator that matches all items
|
||||
- `mask_first{n}` - a mask generator that enables the first `n` items
|
||||
- `load_widen{p:*E, n, [k]W}` - load the `n`-th batch of `k` elements from `p`; if `W` isn't `E`, the result is sign- or zero-extended.
|
||||
- `store_narrow{p:*E, n, x:[k]N, M}` - store equivalent of the `load_widen`; if `E` isn't `N`, `x` is narrowed via `narrow{}`
|
||||
|
||||
To test whether a mask object `M` is `maskNone` or `maskAfter`, `M{0}` can be used - `maskNone{0}` is `0`, but `maskAfter{n}{0}` is `1`.
|
||||
To test whether a mask object `M` is `mask_none` or `mask_first`, `M{0}` can be used - `mask_none{0}` is `0`, but `mask_first{n}{0}` is `1`.
|
||||
|
||||
### Loops
|
||||
|
||||
- `@maskedLoop{bulk}` - loop that generates its body twice, once with a `maskNone` mask, and once with a `maskAfter{n}` one to handle the tail.
|
||||
- `@maskedLoop{bulk}` - loop that generates its body twice, once with a `mask_none` mask, and once with a `mask_first{n}` one to handle the tail.
|
||||
|
||||
- `@muLoop{bulk, unr}` - masked & unrolled loop - generates its body three times (or two if `unr==1`) - once for unrolled main loop, once for the unrolling leftover, and once for the masked end.
|
||||
Unrolling is handled by passing a tuple of indices to process as the index variable (the tail generated bodies get a tuple of the one index)
|
||||
@ -275,19 +275,19 @@ To test whether a mask object `M` is `maskNone` or `maskAfter`, `M{0}` can be us
|
||||
Tuples can be used in the iterated variable list for various things:
|
||||
```
|
||||
p:*T - regular pointer
|
||||
tup{VT,p:*T} - loadBatch/storeBatch vector data
|
||||
tup{VT,p:*T} - load_widen/store_narrow vector data
|
||||
tup{'b',p:P} - load_bits (bits.singeli)
|
||||
tup{'b',VT,p:P} - load_expand_bits (bits.singeli)
|
||||
tup{'g',VT,p:*T} - gives a generator, used as g{} to loadBatch and g{newValue} to storeBatch
|
||||
tup{'g',VT,p:*T} - gives a generator, used as g{} to load_widen and g{newValue} to store_narrow
|
||||
tup{'g',p:*T} - the above, but without load support
|
||||
'm' - get the mask object - either maskNone or some maskAfter{n}
|
||||
'm' - get the mask object - either mask_none or some mask_first{n}
|
||||
```
|
||||
|
||||
Stores via those will implicitly do a masked store when required.
|
||||
|
||||
Loads will load past the end, so `M in 'm'` must be used to mask off elements if they're used for something other than the above stores.
|
||||
|
||||
For `muLoop`, `M in 'm'` still gives a single generator, but it's only ever `maskAfter{n}` when there's only one index.
|
||||
For `muLoop`, `M in 'm'` still gives a single generator, but it's only ever `mask_first{n}` when there's only one index.
|
||||
|
||||
Example usage for a loop that adds `u16` and bit boolean elements to a `u32` accumulator, early-exiting on overflow:
|
||||
```
|
||||
|
||||
@ -251,7 +251,6 @@ def min = __min
|
||||
def max = __max
|
||||
def adds = __adds
|
||||
def subs = __subs
|
||||
def sqrt = __sqrt
|
||||
|
||||
|
||||
|
||||
|
||||
@ -220,7 +220,7 @@ def bin_search_vec{prim, T, w:*T, wn, x:*T, xn, rp, maxwn if hasarch{'AVX2'}} =
|
||||
# Fill with minimum value at the beginning
|
||||
def pre = if (search) load{w} else (if (up) minvalue else maxvalue){T}
|
||||
wg := *V~~(w-gap)
|
||||
wv0:= blend_hom{load{wg}, V**pre, maskOf{V,gap}}
|
||||
wv0:= blend_hom{load{wg}, V**pre, mask_of_first{V,gap}}
|
||||
# For multiple lanes, interleave like transpose
|
||||
def maxstep = lb{maxwn}
|
||||
def lstep = lb{svl}
|
||||
|
||||
@ -72,7 +72,7 @@ export_tab{'si_blend_arr_scalar', each{blend_arr_scalar, tup{u1, '!', '!', u8, u
|
||||
next{}
|
||||
}
|
||||
while (dstC < dstE) {
|
||||
storeBatch{dstC, 0, get{}, maskAfter{dstE - dstC}}
|
||||
store_narrow{dstC, 0, get{}, mask_first{dstE - dstC}}
|
||||
next{}
|
||||
}
|
||||
}}
|
||||
|
||||
@ -25,7 +25,7 @@ fn copy{X, R}(r: *void, x: *void, l:u64, xRaw: *void) : void = {
|
||||
def ur = tern{hasarch{'AARCH64'}, 4, 1}
|
||||
|
||||
if (X==R and R!=u1) {
|
||||
if (hasarch{'X86_64'} and l<=bulk) storeBatch{rp, 0, loadBatch{xp, 0, RV}, maskAfter{l}}
|
||||
if (hasarch{'X86_64'} and l<=bulk) store_narrow{rp, 0, load_widen{xp, 0, RV}, mask_first{l}}
|
||||
else emit{void, 'memcpy', r, x, l*(width{X}/8)}
|
||||
} else if (R==u64) {
|
||||
# show{'R==u64', X, R}
|
||||
@ -49,7 +49,7 @@ fn copy{X, R}(r: *void, x: *void, l:u64, xRaw: *void) : void = {
|
||||
def bulk2 = bulk*unr
|
||||
xi:ux = 0
|
||||
@for_nz (i to cdiv{l,bulk2}) {
|
||||
store_bits{bulk2, rp, i, hom_to_int{each{{i} => op{loadBatch{xp, xi+i, XV}}, iota{unr}}}}
|
||||
store_bits{bulk2, rp, i, hom_to_int{each{{i} => op{load_widen{xp, xi+i, XV}}, iota{unr}}}}
|
||||
xi+= unr
|
||||
}
|
||||
} else if (width{X}<=width{R}) {
|
||||
|
||||
@ -20,7 +20,7 @@ fn equal{W, X}(w:*void, x:*void, l:u64, d:u64) : u1 = {
|
||||
if (W==u1) {
|
||||
if (X==u1) { # bitarr ≡ bitarr
|
||||
def BT = [vw/8]u8
|
||||
@maskedLoop{vw}(w in *BT~~w, x in *BT~~x, M in 'm' over l) if (anyneBit{w,x,M}) return{0}
|
||||
@maskedLoop{vw}(w in *BT~~w, x in *BT~~x, M in 'm' over l) if (anyne_bit{w,x,M}) return{0}
|
||||
} else if (X==f64) { # bitarr ≡ f64arr
|
||||
def TF = [vw/64]f64
|
||||
def TU = [vw/64]u64
|
||||
@ -35,7 +35,7 @@ fn equal{W, X}(w:*void, x:*void, l:u64, d:u64) : u1 = {
|
||||
blend_hom{f0, f1, cw}
|
||||
})
|
||||
cx:= load{*TF ~~ x, i}
|
||||
if (anynePositive{wu, cx, M}) return{0}
|
||||
if (anyne_positive{wu, cx, M}) return{0}
|
||||
}
|
||||
} else { # bitarr ≡ i8/i16/i32arr
|
||||
def T = [bulk]X
|
||||
@ -58,9 +58,9 @@ fn equal{W, X}(w:*void, x:*void, l:u64, d:u64) : u1 = {
|
||||
def R = [bulk]X
|
||||
|
||||
@maskedLoopPositive{bulk}(M in 'm' over i to l) {
|
||||
cw:= loadBatch{*W~~w, i, R}
|
||||
cx:= loadBatch{*X~~x, i, R}
|
||||
if (anynePositive{cw,cx,M}) return{0}
|
||||
cw:= load_widen{*W~~w, i, R}
|
||||
cx:= load_widen{*X~~x, i, R}
|
||||
if (anyne_positive{cw,cx,M}) return{0}
|
||||
}
|
||||
}
|
||||
1
|
||||
|
||||
@ -66,7 +66,7 @@ fn fold_assoc_0{T==f64, op if has_simd}(x:*T, len:u64) : T = {
|
||||
def V = [bulk]T
|
||||
xv:= *V ~~ x
|
||||
e:= len / bulk
|
||||
i:= load{xv, e} & (V~~maskOf{V, len % bulk})
|
||||
i:= load{xv, e} & (V~~mask_of_first{V, len % bulk})
|
||||
r:= reduce_pairwise{op, 2, xv, e, i}
|
||||
if (hasarch{'AARCH64'}) vfold{op, r}
|
||||
else extract{mix{op, r}, 0}
|
||||
|
||||
@ -1,41 +1,42 @@
|
||||
local def maskInit1{w} = {
|
||||
local def bit_mask_init{w} = {
|
||||
apply{merge, each{{x} => {
|
||||
merge{(w/8-1)**255, (1<<x)-1, (w/8)**0}
|
||||
}, iota{8}}}
|
||||
}
|
||||
mask256_1:*u8 = maskInit1{256}; def maskOfBit{T,n if width{T}==256} = load{*[32]u8 ~~ (mask256_1 + (n>>3)^31 + 64*(n&7))}
|
||||
mask128_1:*u8 = maskInit1{128}; def maskOfBit{T,n if width{T}==128} = load{*[16]u8 ~~ (mask128_1 + (n>>3)^15 + 32*(n&7))}
|
||||
mask256_1:*u8 = bit_mask_init{256}; def mask_of_first_bits{T,n if width{T}==256} = load{*[32]u8 ~~ (mask256_1 + (n>>3)^31 + 64*(n&7))}
|
||||
mask128_1:*u8 = bit_mask_init{128}; def mask_of_first_bits{T,n if width{T}==128} = load{*[16]u8 ~~ (mask128_1 + (n>>3)^15 + 32*(n&7))}
|
||||
|
||||
mask256:*i64 = merge{4 ** -1, 4 ** 0}
|
||||
local def maskOfImpl{T, n, w} = load{*ty_u{T} ~~ (*u8~~mask256 + 32 - n*(elwidth{T}/8))}
|
||||
local def mask_of_impl{T, n, w} = load{*ty_u{T} ~~ (*u8~~mask256 + 32 - n*(elwidth{T}/8))}
|
||||
|
||||
# get homogeneous mask of first n items; 0 ≤ n ≤ vcount{T}
|
||||
def maskOf{T,n if w256{T}} = maskOfImpl{T, n, 256}
|
||||
def maskOf{T,n if w128{T}} = maskOfImpl{T, n, 128}
|
||||
def maskOf{T,n if w64{T}} = maskOfImpl{T, n, 64}
|
||||
def mask_of_first{T,n if w256{T}} = mask_of_impl{T, n, 256}
|
||||
def mask_of_first{T,n if w128{T}} = mask_of_impl{T, n, 128}
|
||||
def mask_of_first{T,n if w64{T}} = mask_of_impl{T, n, 64}
|
||||
|
||||
def anyne{x:T, y:T, M if M{0}==0 and isvec{T}} = ~all_hom{x==y}
|
||||
def anyne{x:T, y:T, M if M{0}==1 and isvec{T}} = any_hom{M{x!=y}}
|
||||
def anyne{x:T, y:T, M if M{0}==0 and any_int{x}} = x!=y
|
||||
def anyne{x:T, y:T, M if M{0}==1 and any_int{x}} = M{x^y} != 0
|
||||
def anyneBit{x:T, y:T, M} = ~M{x^y, 'all bits zeroes'}
|
||||
def anyne_bit{x:T, y:T, M} = ~M{x^y, 'all bits zeroes'}
|
||||
|
||||
def anynePositive{x:T, y:T, M if M{0}==0} = anyne{x, y, M}
|
||||
def anynePositive{x:T, y:T, M if M{0}==1 and isvec{T}} = {
|
||||
def anyne_positive{x:V=[_]_, y:V, M if M{0}==0} = anyne{x, y, M}
|
||||
def anyne_positive{x:V=[_]_, y:V, M if M{0}==1} = {
|
||||
def {n,m} = hom_to_int_ext{x==y}
|
||||
def E = tern{type{m}==u64, u64, u32}
|
||||
(promote{E,~m} << (width{E}-M{'count'}*n)) != 0
|
||||
}
|
||||
|
||||
def maskNone{x} = x
|
||||
def maskNone{x, 'all bits zeroes'} = and_bit_none{x, x}
|
||||
def maskAfter{n} = {
|
||||
def mask{x:X, 'all bits zeroes'} = and_bit_none{x, X~~maskOfBit{X,n}}
|
||||
def mask{X, 'to sign bits'} = maskOf{X,n}
|
||||
def mask{X, 'to homogeneous bits'} = maskOf{X,n}
|
||||
def mask_none{x} = x
|
||||
def mask_none{x, 'all bits zeroes'} = and_bit_none{x, x}
|
||||
|
||||
def mask_first{n} = {
|
||||
def mask{x:X, 'all bits zeroes'} = and_bit_none{x, X~~mask_of_first_bits{X,n}}
|
||||
def mask{X, 'to sign bits'} = mask_of_first{X,n}
|
||||
def mask{X, 'to homogeneous bits'} = mask_of_first{X,n}
|
||||
def mask{'count'} = n
|
||||
def mask{{x}} = tup{mask{x}}
|
||||
def mask{x:X if isvec{X}} = x & (X~~maskOf{X,n})
|
||||
def mask{x:X if isvec{X}} = x & (X~~mask_of_first{X,n})
|
||||
def mask{x:X if any_int{x}} = x & ((1<<n) - 1)
|
||||
def mask{0} = 1
|
||||
}
|
||||
@ -43,7 +44,7 @@ def maskAfter{n} = {
|
||||
|
||||
|
||||
# store the i-th batch of k elements to ptr, narrowing elements if needed; masked by M
|
||||
def storeBatch{ptr:*E0, i, x:[k]E1, M} = {
|
||||
def store_narrow{ptr:*E0, i, x:[k]E1, M} = {
|
||||
def rpos = ptr + i*k
|
||||
def TF = re_el{E0, [k]E1}
|
||||
xu:= narrow{E0, x}
|
||||
@ -53,35 +54,35 @@ def storeBatch{ptr:*E0, i, x:[k]E1, M} = {
|
||||
}
|
||||
|
||||
# (sign/zero)-extend the i-th batch of k elements at ptr to [k]E1
|
||||
def loadBatch{ptr:*E0, i, [k]E1} = {
|
||||
def load_widen{ptr:*E0, i, [k]E1} = {
|
||||
def rpos = ptr + i*k
|
||||
def TF = re_el{E0, [k]E1}
|
||||
widen{[k]E1, load_low{*TF~~rpos, k*width{E0}}}
|
||||
}
|
||||
|
||||
def loadBatch {ptr:*E, {...ns}, T } = each{loadBatch {ptr, ., T }, ns}
|
||||
def storeBatch{ptr:*E, {...ns}, xs, M} = each{storeBatch{ptr, ., ., M}, ns, xs}
|
||||
def load_widen {ptr:*E, {...ns}, T } = each{load_widen {ptr, ., T }, ns}
|
||||
def store_narrow{ptr:*E, {...ns}, xs, M} = each{store_narrow{ptr, ., ., M}, ns, xs}
|
||||
|
||||
# TODO also similar any_hom & use those more
|
||||
def all_hom{(maskNone), ...xs} = all_hom{...xs}
|
||||
def all_hom{(mask_none), ...xs} = all_hom{...xs}
|
||||
def all_hom{M, x:T if kgen{M}} = ~any_hom{M{~x}} # TODO better
|
||||
|
||||
# "harmless" pointer cast that'll only cast void*
|
||||
def hCast{T,p} = assert{0, 'expected pointer with element',T,'or void but got ',p}
|
||||
def hCast{T,p:*T} = p
|
||||
def hCast{T,p:(*void)} = *T~~p
|
||||
local def cast_h{T,p} = assert{0, 'expected pointer with element',T,'or void but got ',p}
|
||||
local def cast_h{T,p:*T} = p
|
||||
local def cast_h{T,p:(*void)} = *T~~p
|
||||
|
||||
def mlExec{i, iter, vars0, bulk, M} = {
|
||||
local def ml_exec{i, iter, vars0, bulk, M} = {
|
||||
def vproc{p:*E} = p
|
||||
def vproc{'m'} = tptr{{_}=>M, '!'}
|
||||
|
||||
def vproc{{T,p:*E}} = tptr{{i} => loadBatch{p, i, T}, {i,x} => storeBatch{p, i, x, M}}
|
||||
def vproc{{'b', p:*E}} = tptr{{i} => load_bits{bulk, hCast{u64,p}, i}, '!'}
|
||||
def vproc{{'b',T,p:*E}} = tptr{{i} => load_expand_bits{T, hCast{u64,p}, i}, '!'}
|
||||
def vproc{{'g', p:*E}} = tptr{{i} => ({x} => storeBatch{p, i, x, M}), '!'}
|
||||
def vproc{{T,p:*E}} = tptr{{i} => load_widen{p, i, T}, {i,x} => store_narrow{p, i, x, M}}
|
||||
def vproc{{'b', p:*E}} = tptr{{i} => load_bits{bulk, cast_h{u64,p}, i}, '!'}
|
||||
def vproc{{'b',T,p:*E}} = tptr{{i} => load_expand_bits{T, cast_h{u64,p}, i}, '!'}
|
||||
def vproc{{'g', p:*E}} = tptr{{i} => ({x} => store_narrow{p, i, x, M}), '!'}
|
||||
def vproc{{'g',T,p:*E}} = tptr{{i} => {
|
||||
def dv{} = loadBatch{p, i, T}
|
||||
def dv{x} = storeBatch{p, i, x, M}
|
||||
def dv{} = load_widen{p, i, T}
|
||||
def dv{x} = store_narrow{p, i, x, M}
|
||||
}, '!'}
|
||||
|
||||
iter{i, each{vproc, vars0}}
|
||||
@ -92,10 +93,10 @@ def maskedLoop{bulk, i0}{vars,begin==0,end,iter} = {
|
||||
l:u64 = end
|
||||
|
||||
m:u64 = l / bulk
|
||||
@for (i from i0 to m) mlExec{i, iter, vars, bulk, maskNone}
|
||||
@for (i from i0 to m) ml_exec{i, iter, vars, bulk, mask_none}
|
||||
|
||||
left:= l & (bulk-1)
|
||||
if (left!=0) mlExec{m, iter, vars, bulk, maskAfter{left}}
|
||||
if (left!=0) ml_exec{m, iter, vars, bulk, mask_first{left}}
|
||||
}
|
||||
def maskedLoop{bulk} = maskedLoop{bulk,0}
|
||||
|
||||
@ -104,10 +105,10 @@ def maskedLoopPositive{bulk}{vars,begin==0,end:L,iter} = {
|
||||
assert{end > 0}
|
||||
i:L = 0
|
||||
while(i < (end-1)/bulk) {
|
||||
mlExec{i, iter, vars, bulk, maskNone}
|
||||
ml_exec{i, iter, vars, bulk, mask_none}
|
||||
++i
|
||||
}
|
||||
mlExec{i, iter, vars, bulk, maskAfter{end - i*bulk}}
|
||||
ml_exec{i, iter, vars, bulk, mask_first{end - i*bulk}}
|
||||
}
|
||||
|
||||
|
||||
@ -125,30 +126,30 @@ def muLoop{bulk, unr, fromunr}{vars,begin==0,end,iter} = {
|
||||
|
||||
m:u64 = l / bulk
|
||||
if (unr==1) {
|
||||
@for (i from 0 to m) mlExec{tup{i}, iter, vars, bulk, maskNone}
|
||||
@for (i from 0 to m) ml_exec{tup{i}, iter, vars, bulk, mask_none}
|
||||
|
||||
left:= l & (bulk-1)
|
||||
if (left!=0) mlExec{tup{m}, iter, vars, bulk, maskAfter{left}}
|
||||
if (left!=0) ml_exec{tup{m}, iter, vars, bulk, mask_first{left}}
|
||||
} else {
|
||||
if (m > 0) {
|
||||
i:u64 = 0
|
||||
if (unr <= m) {
|
||||
while ((i+unr) <= m) {
|
||||
def is = each{{j}=>i+j, iota{unr}}
|
||||
mlExec{each{{j}=>i+j, iota{unr}}, iter, vars, bulk, maskNone}
|
||||
ml_exec{each{{j}=>i+j, iota{unr}}, iter, vars, bulk, mask_none}
|
||||
i+= unr
|
||||
}
|
||||
fromunr{}
|
||||
}
|
||||
if (unr==2) {
|
||||
if (i!=m) mlExec{tup{i}, iter, vars, bulk, maskNone}
|
||||
if (i!=m) ml_exec{tup{i}, iter, vars, bulk, mask_none}
|
||||
} else {
|
||||
@for(j from i to m) mlExec{tup{j}, iter, vars, bulk, maskNone}
|
||||
@for(j from i to m) ml_exec{tup{j}, iter, vars, bulk, mask_none}
|
||||
}
|
||||
}
|
||||
|
||||
left:= l & (bulk-1)
|
||||
if (left!=0) mlExec{tup{m}, iter, vars, bulk, maskAfter{left}}
|
||||
if (left!=0) ml_exec{tup{m}, iter, vars, bulk, mask_first{left}}
|
||||
}
|
||||
}
|
||||
def muLoop{bulk, unr} = muLoop{bulk, unr, {}=>0}
|
||||
|
||||
@ -142,7 +142,7 @@ def rep_const_shuffle{wv, onreps, xv:*V=[step]T, rv:*V, n:(u64)} = { # onreps{in
|
||||
}}
|
||||
setlabel{end}
|
||||
q := nr & (step-1)
|
||||
if (q!=0) store_blended_hom{rv+e, maskOf{V, q}, s}
|
||||
if (q!=0) store_blended_hom{rv+e, mask_of_first{V, q}, s}
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ fn rep_const_shuffle_partial4(wv:u64, ellw:u64, x:*i8, r:*i8, n:u64) : void = {
|
||||
setlabel{end}
|
||||
|
||||
q := (re+step) - r
|
||||
if (q!=0) store_blended_hom{*V~~r, maskOf{V, q}, s}
|
||||
if (q!=0) store_blended_hom{*V~~r, mask_of_first{V, q}, s}
|
||||
}
|
||||
|
||||
|
||||
@ -373,7 +373,7 @@ def get_boolvec_writer{V, r, nw} = {
|
||||
def flush{} = {
|
||||
setlabel{l_flush}
|
||||
q := nw & (vwords-1)
|
||||
if (q != 0) store_blended_hom{rv, V~~maskOf{re_el{u64,V}, q}, last_res}
|
||||
if (q != 0) store_blended_hom{rv, V~~mask_of_first{re_el{u64,V}, q}, last_res}
|
||||
setlabel{done}
|
||||
}
|
||||
tup{output, check_done, flush}
|
||||
|
||||
@ -21,7 +21,7 @@ def scan_loop{init, x:*T, r:*T, len:(u64), scan, scan_last} = {
|
||||
e:= len/step
|
||||
@for (xv, rv over e) rv = scan{xv,p}
|
||||
q:= len & (step-1)
|
||||
if (q!=0) store_blended_hom{rv+e, maskOf{V, q}, scan_last{load{xv,e}, p}}
|
||||
if (q!=0) store_blended_hom{rv+e, mask_of_first{V, q}, scan_last{load{xv,e}, p}}
|
||||
}
|
||||
def get_scan_last{op, pre} = {
|
||||
def last{v, p} = op{pre{v}, p}
|
||||
@ -58,7 +58,7 @@ fn scan_idem{T, op if hasarch{'X86_64'}}(x:*T, r:*T, len:u64, init:T) : void = {
|
||||
}
|
||||
@for (xv, rv over _ from ek*k to e) rv = scan{xv,p}
|
||||
q:= len & (step-1)
|
||||
if (q!=0) store_blended_hom{rv+e, maskOf{V, q}, last{load{xv,e}, p}}
|
||||
if (q!=0) store_blended_hom{rv+e, mask_of_first{V, q}, last{load{xv,e}, p}}
|
||||
}
|
||||
|
||||
export{'si_scan_min_init_i8', scan_idem{i8 , min}}; export{'si_scan_max_init_i8', scan_idem{i8 , max}}
|
||||
@ -197,7 +197,7 @@ fn bcs{T if hasarch{'AVX2'}}(x:*u64, r:*T, l:u64) : void = {
|
||||
if (jv+vl <= l) {
|
||||
store{p, j, v}
|
||||
} else {
|
||||
if (jv < l) store_blended_hom{rv+j, maskOf{V, l - jv}, v}
|
||||
if (jv < l) store_blended_hom{rv+j, mask_of_first{V, l - jv}, v}
|
||||
return{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ def masked_multistore{r0, vs, M, end} = { # returns bumped-forwards r
|
||||
r:= r0
|
||||
def left = if (M{0}) { left:ux = M{'count'} } else 0
|
||||
def lastMaskedStore = make_opt_branch{M{0}, tup{one_type{vs}}, {c} => {
|
||||
storeBatch{r, 0, c, maskAfter{left}}
|
||||
store_narrow{r, 0, c, mask_first{left}}
|
||||
end{}
|
||||
}}
|
||||
|
||||
@ -42,7 +42,7 @@ def masked_multistore{r0, vs, M, end} = { # returns bumped-forwards r
|
||||
if (i+1 == length{vs} or left<k) lastMaskedStore{c}
|
||||
left-= k
|
||||
}
|
||||
storeBatch{r, 0, c, maskNone}
|
||||
store_narrow{r, 0, c, mask_none}
|
||||
r+= k
|
||||
}, inds{vs}, vs}
|
||||
r
|
||||
@ -84,7 +84,7 @@ export{'INDS_BUF_MAX_COPY', ux~~inds_buf_max}
|
||||
iters:ux = 0
|
||||
while (*void~~(r + ni) <= r1) {
|
||||
def rs = G{x}{iv}
|
||||
masked_multistore{r, rs, maskNone, '!'}
|
||||
masked_multistore{r, rs, mask_none, '!'}
|
||||
x+= xbump
|
||||
r+= rbump
|
||||
++iters
|
||||
|
||||
@ -66,7 +66,7 @@ def for_special_buffered{r:*T, write_len}{vars,begin,sum,iter} = {
|
||||
def R = [vc]T
|
||||
@unroll ((ov/vc)>>0) if (end-buf>vc) { store{*R~~r0, 0, load{*R~~buf}}; r0+=vc; buf+=vc }
|
||||
assert{bufw % width{R} == 0} # to make sure the below doesn't read out-of-bounds on the stack
|
||||
store_blended_hom{*R~~r0, maskOf{R, end-buf}, load{*R~~buf}}
|
||||
store_blended_hom{*R~~r0, mask_of_first{R, end-buf}, load{*R~~buf}}
|
||||
} else {
|
||||
@for (r0, buf over u64~~(end-buf)) r0 = buf
|
||||
}
|
||||
@ -216,7 +216,7 @@ fn slash{c, T if hasarch{'AVX2'} and T>=i32}(wp:*u64, x:arg{c,T}, r:*T, l:u64, s
|
||||
}
|
||||
def tab = if (tw==32) itab else i64tab
|
||||
def step{r, w} = {
|
||||
s:= loadBatch{*u8~~(tab+w), 0, V}
|
||||
s:= load_widen{*u8~~(tab+w), 0, V}
|
||||
store{*V~~r, 0, from_ind{s}}
|
||||
}
|
||||
@for_special_buffered{r,8} (w in *u8~~wp over sum) {
|
||||
|
||||
@ -20,13 +20,13 @@ def any_nonC32{M, x:T=[_]_ if hasarch{'X86_64'}} = {
|
||||
def ne = H~~x != H**cast_i{u32, cbqn_c32Tag{}<<16}
|
||||
any_top{M{T~~ne}}
|
||||
}
|
||||
def any_nonC32{(maskNone), x:[k](u64), y:[k](u64)} = {
|
||||
def any_nonC32{(mask_none), x:[k](u64), y:[k](u64)} = {
|
||||
def T32 = [k*2]u32
|
||||
def hi = shuf_ind{T32~~x, T32~~y, match(k) {
|
||||
{2} => tup{1,3,5,7}
|
||||
{4} => tup{1,3,9,11,5,7,13,15} # all odd indices, in the order that vshufps can handle
|
||||
}}
|
||||
anyne{hi, T32**cast_i{u32, cbqn_c32Tag{}<<16}, maskNone}
|
||||
anyne{hi, T32**cast_i{u32, cbqn_c32Tag{}<<16}, mask_none}
|
||||
}
|
||||
|
||||
fn squeeze{vw, X, CHR, B}(x0:*void, len:ux) : u32 = {
|
||||
|
||||
@ -239,7 +239,7 @@ def uninterleave{r0:*T, r1:*T, xp:*T, n if has_simd and (not hasarch{'X86_64'} o
|
||||
xb := xv + 2*nv
|
||||
x0 := load{xb}
|
||||
x1 := V**0; if (n&(l/2) != 0) x1 = load{xb, 1}
|
||||
mask := maskOf{V, n%l}
|
||||
mask := mask_of_first{V, n%l}
|
||||
each{store_blended_hom{., mask, .}, tup{rv0+nv,rv1+nv}, uz{x0,x1}}
|
||||
}
|
||||
}
|
||||
@ -258,7 +258,7 @@ fn interleave{T if has_simd}(r:*void, x0:*void, x1:*void, n:u64) : void = {
|
||||
r0 := get_r{0}
|
||||
rb := rv + 2*nv
|
||||
nr := 2*n; m := nr%l
|
||||
mask := maskOf{V, m}
|
||||
mask := mask_of_first{V, m}
|
||||
if (nr&l == 0) {
|
||||
store_blended_hom{rb, mask, r0}
|
||||
} else {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user