more Singeli mask op renames
This commit is contained in:
parent
6f8b08bb1f
commit
ad79ef71cf
@ -188,10 +188,10 @@ For unaligned scalar loads & stores, `loadu` & `storeu` should be used.
|
||||
- `store{p:*V, a:V} : void` - store full vector
|
||||
- `loadLow{p:*V, w} : V` - load to low `w` bits
|
||||
- `storeLow{p:*E, w, a:[n]E}` - store low `w` bits
|
||||
- `homMaskStore{p:*V, m:mt{V}, a:V}` - conditionally store elements based on mask; won't touch masked-off elements
|
||||
- `topMaskStore{p:*V, m:V, a:V}` - conditionally store elements based on top bit of `m`; won't touch masked-off elements
|
||||
- `homMaskStoreF` - `homMaskStore` but may touch masked-off elements and thus be supported on more types
|
||||
- `topMaskStoreF` - `topMaskStore` but may touch masked-off elements and thus be supported on more types
|
||||
- `store_masked_hom{p:*V, m:mt{V}, a:V}` - conditionally store elements based on mask; won't touch masked-off elements
|
||||
- `store_masked_top{p:*V, m:V, a:V}` - conditionally store elements based on top bit of `m`; won't touch masked-off elements
|
||||
- `store_blended_hom` - `store_masked_hom` but may touch masked-off elements and thus be supported on more types
|
||||
- `store_blended_top` - `store_masked_top` but may touch masked-off elements and thus be supported on more types
|
||||
|
||||
<!-- useless x86 defs for vector-width-aligned load/store: loada storea -->
|
||||
|
||||
|
||||
@ -24,13 +24,13 @@ def sel{L, x:T, i:I if w256{T} and lvec{L,8,32} and w256{I,32}} = vec_shuffle{L,
|
||||
def sel{L, x:T, i:I if w256{T} and lvec{L,16,8} and w256{I, 8}} = vec_shuffle{L, x, i}
|
||||
|
||||
# masked store; F variants may not be a single instruction
|
||||
def topMaskStore{p:*T, m:M, v:T if w256i{M, 32} and w256{T,elwidth{M}}} = emit{void, '_mm256_maskstore_epi32', *i32~~p, m, [8]i32~~v}
|
||||
def topMaskStore{p:*T, m:M, v:T if w256i{M, 64} and w256{T,elwidth{M}}} = emit{void, '_mm256_maskstore_pd', *f64~~p, m, [4]f64~~v}
|
||||
def homMaskStore{p:*T, m:M, v:T if w256i{M} and w256{T,elwidth{M}}} = topMaskStore{p, m, v}
|
||||
def store_masked_top{p:*T, m:M, v:T if w256i{M, 32} and w256{T,elwidth{M}}} = emit{void, '_mm256_maskstore_epi32', *i32~~p, m, [8]i32~~v}
|
||||
def store_masked_top{p:*T, m:M, v:T if w256i{M, 64} and w256{T,elwidth{M}}} = emit{void, '_mm256_maskstore_pd', *f64~~p, m, [4]f64~~v}
|
||||
def store_masked_hom{p:*T, m:M, v:T if w256i{M} and w256{T,elwidth{M}}} = store_masked_top{p, m, v}
|
||||
|
||||
def topMaskStoreF{p:*T, m:M, v:T if w256i{M} and elwidth{T}>=32} = topMaskStore{p,m,v}
|
||||
def homMaskStoreF{p:*T, m:M, v:T if w256i{M} and elwidth{T}>=32} = topMaskStore{p,m,v}
|
||||
def homMaskStoreF{p:*T, m:M, v:T if w256i{M} and elwidth{T}<=16 and w256{T,elwidth{M}}} = store{p, 0, blend_hom{load{p}, v, m}}
|
||||
def store_blended_top{p:*T, m:M, v:T if w256i{M} and elwidth{T}>=32} = store_masked_top{p,m,v}
|
||||
def store_blended_hom{p:*T, m:M, v:T if w256i{M} and elwidth{T}>=32} = store_masked_top{p,m,v}
|
||||
def store_blended_hom{p:*T, m:M, v:T if w256i{M} and elwidth{T}<=16 and w256{T,elwidth{M}}} = store{p, 0, blend_hom{load{p}, v, m}}
|
||||
|
||||
# mask stuff
|
||||
def top_to_int{x:T if w256{T, 8}} = emit{u32, '_mm256_movemask_epi8', x}
|
||||
|
||||
@ -26,15 +26,15 @@ def reinterpret{T, a:M=[l==width{T}](u1)} = {
|
||||
re_mask{M, {l,w,W} => cast_i{T, emit{W, merge{'_cvtmask',l,'_u',w}, a}}}
|
||||
}
|
||||
|
||||
def maskStore{p:*V, m:[l](u1), v:V=[l]_ if has512e{V}} = {
|
||||
def store_masked_hom{p:*V, m:[l](u1), v:V=[l]_ if has512e{V}} = {
|
||||
emit{void, merge{pref{V}, 'mask_storeu_', suf{V}}, p, m, v}
|
||||
}
|
||||
|
||||
def topMaskReg{x:V=[k]_} = emit{[k]u1, merge{pref{V},'mov',suf{V},'_mask'}, x}
|
||||
def top_to_int{x:V=[k]_ if 512==width{V}} = ty_u{k}~~topMaskReg{x}
|
||||
def top_to_mask{x:V=[k]_} = emit{[k]u1, merge{pref{V},'mov',suf{V},'_mask'}, x}
|
||||
def top_to_int{x:V=[k]_ if 512==width{V}} = ty_u{k}~~top_to_mask{x}
|
||||
def hom_to_int{x:V=[_]_ if 512==width{V}} = top_to_int{x}
|
||||
|
||||
def maskToHom{V=[l]_, x:[l](u1)} = emit{V, merge{pref{V},'movm_',suf{V}}, x}
|
||||
def mask_to_hom{V=[l]_, x:[l](u1)} = emit{V, merge{pref{V},'movm_',suf{V}}, x}
|
||||
|
||||
def sel{(ty_u{V}), x:V=[_]E, i:I==(ty_u{V}) if (if (width{E}>8) has512e{V} else has512{V, 'VBMI'})} = emit{V, merge{pref{V}, 'permutexvar_', suf{V}}, i, x}
|
||||
|
||||
|
||||
@ -173,9 +173,9 @@ def lvec = match { {[n]T, n, (width{T})} => 1; {T, n, w} => 0 }
|
||||
def {
|
||||
absu,andAllZero,andnz,b_getBatch,blend,blend_units,clmul,cvt,extract,fold_addw,half,
|
||||
all_bit,any_bit,blend_bit,
|
||||
all_hom,any_hom,blend_hom,hom_to_int,homMaskStore,homMaskStoreF,
|
||||
all_top,any_top,blend_top,top_to_int,topMaskStore,topMaskStoreF,
|
||||
loadBatchBit,loadLow,make,maskStore,maskToHom,mulw_split,mulh,narrow,narrow_trunc,narrow_pair,
|
||||
all_hom,any_hom,blend_hom,hom_to_int,store_masked_hom,store_blended_hom,
|
||||
all_top,any_top,blend_top,top_to_int,store_masked_top,store_blended_top,
|
||||
loadBatchBit,loadLow,make,mask_to_hom,mulw_split,mulh,narrow,narrow_trunc,narrow_pair,
|
||||
packQ,pair,pdep,pext,popcRand,rbit,sel,shl,shr,shufInd,storeLow,
|
||||
unord,unzip,vfold,vec_select,vec_shuffle,widen,widen_upper,multishift,
|
||||
}
|
||||
|
||||
@ -50,7 +50,7 @@ def storeBatch{ptr:*E0, i, x:[k]E1, M} = {
|
||||
def TF = re_el{E0, [k]E1}
|
||||
xu:= narrow{E0, x}
|
||||
|
||||
if (M{0}) homMaskStoreF{*TF~~rpos, M{TF, 'to homogeneous bits'}, undefPromote{TF, xu}}
|
||||
if (M{0}) store_blended_hom{*TF~~rpos, M{TF, 'to homogeneous bits'}, undefPromote{TF, xu}}
|
||||
else storeLow{rpos, k*width{E0}, xu}
|
||||
}
|
||||
|
||||
|
||||
@ -175,4 +175,4 @@ def hom_to_int_ext{a:T=[k]E if E!=u64} = {
|
||||
}
|
||||
|
||||
|
||||
def homMaskStoreF{p:*T, m:M, v:T if nveci{M} and nvec{T,elwidth{M}}} = store{p, 0, blend_hom{load{p}, v, m}}
|
||||
def store_blended_hom{p:*T, m:M, v:T if nveci{M} and nvec{T,elwidth{M}}} = store{p, 0, blend_hom{load{p}, v, m}}
|
||||
|
||||
@ -142,7 +142,7 @@ def rep_const_shuffle{wv, onreps, xv:*V=[step]T, rv:*V, n:(u64)} = { # onreps{in
|
||||
}}
|
||||
setlabel{end}
|
||||
q := nr & (step-1)
|
||||
if (q!=0) homMaskStoreF{rv+e, maskOf{V, q}, s}
|
||||
if (q!=0) store_blended_hom{rv+e, maskOf{V, q}, s}
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ fn rep_const_shuffle_partial4(wv:u64, ellw:u64, x:*i8, r:*i8, n:u64) : void = {
|
||||
setlabel{end}
|
||||
|
||||
q := (re+step) - r
|
||||
if (q!=0) homMaskStoreF{*V~~r, maskOf{V, q}, s}
|
||||
if (q!=0) store_blended_hom{*V~~r, maskOf{V, q}, s}
|
||||
}
|
||||
|
||||
|
||||
@ -373,7 +373,7 @@ def get_boolvec_writer{V, r, nw} = {
|
||||
def flush{} = {
|
||||
setlabel{l_flush}
|
||||
q := nw & (vwords-1)
|
||||
if (q != 0) homMaskStoreF{rv, V~~maskOf{re_el{u64,V}, q}, last_res}
|
||||
if (q != 0) store_blended_hom{rv, V~~maskOf{re_el{u64,V}, q}, last_res}
|
||||
setlabel{done}
|
||||
}
|
||||
tup{output, check_done, flush}
|
||||
|
||||
@ -21,7 +21,7 @@ def scan_loop{init, x:*T, r:*T, len:(u64), scan, scan_last} = {
|
||||
e:= len/step
|
||||
@for (xv, rv over e) rv = scan{xv,p}
|
||||
q:= len & (step-1)
|
||||
if (q!=0) homMaskStoreF{rv+e, maskOf{V, q}, scan_last{load{xv,e}, p}}
|
||||
if (q!=0) store_blended_hom{rv+e, maskOf{V, q}, scan_last{load{xv,e}, p}}
|
||||
}
|
||||
def get_scan_last{op, pre} = {
|
||||
def last{v, p} = op{pre{v}, p}
|
||||
@ -58,7 +58,7 @@ fn scan_idem{T, op if hasarch{'X86_64'}}(x:*T, r:*T, len:u64, init:T) : void = {
|
||||
}
|
||||
@for (xv, rv over _ from ek*k to e) rv = scan{xv,p}
|
||||
q:= len & (step-1)
|
||||
if (q!=0) homMaskStoreF{rv+e, maskOf{V, q}, last{load{xv,e}, p}}
|
||||
if (q!=0) store_blended_hom{rv+e, maskOf{V, q}, last{load{xv,e}, p}}
|
||||
}
|
||||
|
||||
export{'si_scan_min_init_i8', scan_idem{i8 , min}}; export{'si_scan_max_init_i8', scan_idem{i8 , max}}
|
||||
@ -131,11 +131,11 @@ fn scan_neq{if hasarch{'AVX512BW', 'VPCLMULQDQ', 'GFNI'}}(init:u64, x:*u64, r:*u
|
||||
hb := sse{top_to_int{[64]u8~~x8}}
|
||||
xh := exor64{hb} # Exclusive xor of high bits (xh ^ hb for inclusive)
|
||||
xc := xh ^ carry
|
||||
v := x8 ^ V~~maskToHom{[64]u8, [64]u1~~extract{xc,0}}
|
||||
v := x8 ^ V~~mask_to_hom{[64]u8, [64]u1~~extract{xc,0}}
|
||||
carry = (xc ^ hb) ^ shuf{u64, xh, 1,1}
|
||||
rem:= nw - 8*i
|
||||
if (rem < 8) {
|
||||
maskStore{*V~~r+i, [8]u1~~(~(u8~~0xff<<rem)), v}
|
||||
store_masked_hom{*V~~r+i, [8]u1~~(~(u8~~0xff<<rem)), v}
|
||||
return{}
|
||||
}
|
||||
rv = v
|
||||
@ -197,7 +197,7 @@ fn bcs{T if hasarch{'AVX2'}}(x:*u64, r:*T, l:u64) : void = {
|
||||
if (jv+vl <= l) {
|
||||
store{p, j, v}
|
||||
} else {
|
||||
if (jv < l) homMaskStoreF{rv+j, maskOf{V, l - jv}, v}
|
||||
if (jv < l) store_blended_hom{rv+j, maskOf{V, l - jv}, v}
|
||||
return{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ def for_special_buffered{r:*T, write_len}{vars,begin,sum,iter} = {
|
||||
def R = [vc]T
|
||||
@unroll ((ov/vc)>>0) if (end-buf>vc) { store{*R~~r0, 0, load{*R~~buf}}; r0+=vc; buf+=vc }
|
||||
assert{bufw % width{R} == 0} # to make sure the below doesn't read out-of-bounds on the stack
|
||||
homMaskStoreF{*R~~r0, maskOf{R, end-buf}, load{*R~~buf}}
|
||||
store_blended_hom{*R~~r0, maskOf{R, end-buf}, load{*R~~buf}}
|
||||
} else {
|
||||
@for (r0, buf over u64~~(end-buf)) r0 = buf
|
||||
}
|
||||
@ -255,7 +255,7 @@ fn slash{c, T if hasarch{if (width{T}>=32) 'AVX512F' else 'AVX512VBMI2'}}(w:*u64
|
||||
cs := cast_i{I,promote{i64,1}<<(c%64) - 1}
|
||||
if (vl==64) cs -= cast_i{I,c}>>6
|
||||
v := emitT{V, 'mask_compress', x, m, x}
|
||||
maskStore{*V~~r, [vl]u1~~cs, v}
|
||||
store_masked_hom{*V~~r, [vl]u1~~cs, v}
|
||||
r += c
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ def packQ{a:T,b:T if w128i{T}} = packs{a,b}
|
||||
def x86_shufps_range{is, hi} = inrange{is,0,2, 0,4} and inrange{is,2,4, hi,hi+4}
|
||||
def shufInd{a:T, b:T=[4]E, {...is} if width{E}==32 and length{is}==4 and x86_shufps_range{is, 4}} = vec_shuffle{[4]f32, tup{a, b}, is&3}
|
||||
|
||||
def homMaskStoreF{p:*T, m:M, v:T if w128i{M} and w128{T,elwidth{M}}} = store{p, 0, blend_hom{load{p}, v, m}}
|
||||
def store_blended_hom{p:*T, m:M, v:T if w128i{M} and w128{T,elwidth{M}}} = store{p, 0, blend_hom{load{p}, v, m}}
|
||||
|
||||
def widen{T, x:X if w128i{T} and w128i{X} and w128s{T}==w128s{X} and elwidth{T}>elwidth{X}} = {
|
||||
def s{v} = s{mzip{v,v,0}}
|
||||
|
||||
@ -240,7 +240,7 @@ def uninterleave{r0:*T, r1:*T, xp:*T, n if has_simd and (not hasarch{'X86_64'} o
|
||||
x0 := load{xb}
|
||||
x1 := V**0; if (n&(l/2) != 0) x1 = load{xb, 1}
|
||||
mask := maskOf{V, n%l}
|
||||
each{homMaskStoreF{., mask, .}, tup{rv0+nv,rv1+nv}, uz{x0,x1}}
|
||||
each{store_blended_hom{., mask, .}, tup{rv0+nv,rv1+nv}, uz{x0,x1}}
|
||||
}
|
||||
}
|
||||
fn interleave{T if has_simd}(r:*void, x0:*void, x1:*void, n:u64) : void = {
|
||||
@ -260,10 +260,10 @@ fn interleave{T if has_simd}(r:*void, x0:*void, x1:*void, n:u64) : void = {
|
||||
nr := 2*n; m := nr%l
|
||||
mask := maskOf{V, m}
|
||||
if (nr&l == 0) {
|
||||
homMaskStoreF{rb, mask, r0}
|
||||
store_blended_hom{rb, mask, r0}
|
||||
} else {
|
||||
store{rb, 0, r0}
|
||||
if (m > 0) homMaskStoreF{rb+1, mask, get_r{1}}
|
||||
if (m > 0) store_blended_hom{rb+1, mask, get_r{1}}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user