use ~~ more
This commit is contained in:
parent
dd979e172f
commit
7cd19c2840
@ -19,31 +19,27 @@ def w256u = wgen256{{T} => isint{T} & isunsigned{T}}
|
||||
def w256f = wgen256{{T} => isfloat{T}}
|
||||
|
||||
|
||||
def cast_vp{T, x & w256{T}} = emit{*T, '(void*)', x}
|
||||
def cast_v{R, x:S & w256i{R } & w256{S}} = emit{R, '(__m256i)', x}
|
||||
def cast_v{R, x:S & w256f{R,32} & w256{S}} = emit{R, '(__m256)', x}
|
||||
def cast_v{R, x:S & w256f{R,64} & w256{S}} = emit{R, '(__m256d)', x}
|
||||
def v2i{x:T & w256{T}} = cast_v{[32]u8, x} # for compact casting for the annoying intrinsic type system
|
||||
def v2f{x:T & w256{T}} = cast_v{[8]f32, x}
|
||||
def v2d{x:T & w256{T}} = cast_v{[4]f64, x}
|
||||
def v2i{x:T & w256{T}} = [32]u8 ~~ x # for compact casting for the annoying intrinsic type system
|
||||
def v2f{x:T & w256{T}} = [8]f32 ~~ x
|
||||
def v2d{x:T & w256{T}} = [4]f64 ~~ x
|
||||
|
||||
def to_el{E, T & isvec{T}} = [width{T}/width{E}]E
|
||||
def to_el{E, x:T} = cast_v{to_el{E,T}, x}
|
||||
def forv{T & w256{T}} = forc{{v}=>cast_vp{T,v}}
|
||||
def to_el{E, x:T} = to_el{E,T} ~~ x
|
||||
def forv{T & w256{T}} = forc{{v} => *T ~~ v}
|
||||
|
||||
# load & store
|
||||
def load {a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_loadu_si256', a+n}
|
||||
def loada{a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_load_si256', a+n}
|
||||
def load {a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_loadu_pd', cast_p{f64, a+n}}
|
||||
def loada{a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_load_pd', cast_p{f64, a+n}}
|
||||
def load {a:T, n & w256f{eltype{T},32}} = emit{eltype{T}, '_mm256_loadu_ps', cast_p{f32, a+n}}
|
||||
def loada{a:T, n & w256f{eltype{T},32}} = emit{eltype{T}, '_mm256_load_ps', cast_p{f32, a+n}}
|
||||
def store {a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_storeu_si256', a+n, v}
|
||||
def storea{a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_store_si256', a+n, v}
|
||||
def store {a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_storeu_pd', cast_p{f64, a+n}, v}
|
||||
def storea{a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_store_pd', cast_p{f64, a+n}, v}
|
||||
def store {a:T, n, v & w256f{eltype{T},32}} = emit{void, '_mm256_storeu_ps', cast_p{f32, a+n}, v}
|
||||
def storea{a:T, n, v & w256f{eltype{T},32}} = emit{void, '_mm256_store_ps', cast_p{f32, a+n}, v}
|
||||
def load {a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_loadu_si256', a+n}
|
||||
def loada{a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_load_si256', a+n}
|
||||
def load {a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_loadu_pd', *f64 ~~ (a+n)}
|
||||
def loada{a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_load_pd', *f64 ~~ (a+n)}
|
||||
def load {a:T, n & w256f{eltype{T},32}} = emit{eltype{T}, '_mm256_loadu_ps', *f32 ~~ (a+n)}
|
||||
def loada{a:T, n & w256f{eltype{T},32}} = emit{eltype{T}, '_mm256_load_ps', *f32 ~~ (a+n)}
|
||||
def store {a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_storeu_si256', a+n, v}
|
||||
def storea{a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_store_si256', a+n, v}
|
||||
def store {a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_storeu_pd', *f64 ~~ (a+n), v}
|
||||
def storea{a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_store_pd', *f64 ~~ (a+n), v}
|
||||
def store {a:T, n, v & w256f{eltype{T},32}} = emit{void, '_mm256_storeu_ps', *f32 ~~ (a+n), v}
|
||||
def storea{a:T, n, v & w256f{eltype{T},32}} = emit{void, '_mm256_store_ps', *f32 ~~ (a+n), v}
|
||||
|
||||
# broadcast
|
||||
def broadcast{T, v & w256i{T, 8}} = emit{T, '_mm256_set1_epi8', ext{eltype{T},v}}
|
||||
@ -68,14 +64,14 @@ def iota{T & w256{T,32}} = make{T,0,1,2,3,4,5,6,7}
|
||||
def iota{T & w256{T,16}} = make{T,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
|
||||
def iota{T & w256{T,8}} = make{T,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}
|
||||
|
||||
def __xor{a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_xor_ps', v2f{a}, v2f{b}}}
|
||||
def __and{a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_and_ps', v2f{a}, v2f{b}}}
|
||||
def __or {a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_or_ps', v2f{a}, v2f{b}}}
|
||||
def __xor{a:T, b:T & w256i{T}} = T ~~ emit{[8]f32, '_mm256_xor_ps', v2f{a}, v2f{b}}
|
||||
def __and{a:T, b:T & w256i{T}} = T ~~ emit{[8]f32, '_mm256_and_ps', v2f{a}, v2f{b}}
|
||||
def __or {a:T, b:T & w256i{T}} = T ~~ emit{[8]f32, '_mm256_or_ps', v2f{a}, v2f{b}}
|
||||
|
||||
def __not{a:T & w256u{T}} = a ^ broadcast{T, ~cast{eltype{T},0}}
|
||||
|
||||
# f64 comparison
|
||||
def f64cmpAVX{a,b,n} = cast_v{[4]u64, emit{[4]f64, '_mm256_cmp_pd', a, b, n}}
|
||||
def f64cmpAVX{a,b,n} = [4]u64 ~~ emit{[4]f64, '_mm256_cmp_pd', a, b, n}
|
||||
def __eq{a:T,b:T & T==[4]f64} = f64cmpAVX{a,b, 0}
|
||||
def __ne{a:T,b:T & T==[4]f64} = f64cmpAVX{a,b, 4}
|
||||
def __gt{a:T,b:T & T==[4]f64} = f64cmpAVX{a,b,30}
|
||||
@ -126,12 +122,12 @@ def insert{x:T, i, v & w256i{T,32} & knum{i}} = emit{T, '_mm256_insert_epi32', x
|
||||
def insert{x:T, i, v & w256i{T,64} & knum{i}} = emit{T, '_mm256_insert_epi64', x, v, i}
|
||||
|
||||
# blend by sign bit
|
||||
def blend{f:T, t:T, m:M & w256{T} & w256i{M,32}} = cast_v{T, emit{[8]f32, '_mm256_blendv_ps', v2f{f}, v2f{t}, v2f{m}}}
|
||||
def blend{f:T, t:T, m:M & w256{T} & w256i{M,64}} = cast_v{T, emit{[4]f64, '_mm256_blendv_pd', v2d{f}, v2d{t}, v2d{m}}}
|
||||
def blend{f:T, t:T, m:M & w256{T} & w256i{M,32}} = T ~~ emit{[8]f32, '_mm256_blendv_ps', v2f{f}, v2f{t}, v2f{m}}
|
||||
def blend{f:T, t:T, m:M & w256{T} & w256i{M,64}} = T ~~ emit{[4]f64, '_mm256_blendv_pd', v2d{f}, v2d{t}, v2d{m}}
|
||||
|
||||
# mixed-width operations
|
||||
def half{x:T, i & w256{T} & knum{i}} = cast_v{[vcount{T}/2](eltype{T}), emit{[8]i16, '_mm256_extracti128_si256', v2i{x}, i}}
|
||||
def pair{a:T,b:T & width{T}==128} = cast_v{[vcount{T}*2](eltype{T}), emit{[8]i32, '_mm256_setr_m128i', a, b}}
|
||||
def half{x:T, i & w256{T} & knum{i}} = [vcount{T}/2](eltype{T}) ~~ emit{[8]i16, '_mm256_extracti128_si256', v2i{x}, i}
|
||||
def pair{a:T,b:T & width{T}==128} = [vcount{T}*2](eltype{T}) ~~ emit{[8]i32, '_mm256_setr_m128i', a, b}
|
||||
def pair{x} = pair{tupsel{0,x},tupsel{1,x}}
|
||||
|
||||
# mask stuff
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# maskstore
|
||||
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 32} & w256i{M, 32}} = emit{void, '_mm256_maskstore_epi32', cast_p{i32, a+n}, m, v}
|
||||
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 64} & w256i{M, 64}} = emit{void, '_mm256_maskstore_pd', cast_p{f64, a+n}, m, v}
|
||||
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 32} & w256i{M, 32}} = emit{void, '_mm256_maskstore_epi32', *i32 ~~ (a+n), m, v}
|
||||
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 64} & w256i{M, 64}} = emit{void, '_mm256_maskstore_pd', *f64 ~~ (a+n), m, v}
|
||||
|
||||
# maskstore with all cases defined, at the cost of not being a single instruction
|
||||
def maskstoreF{p, m, n, x:T} = store{p, n, blendF{load{p,n}, x, m}}
|
||||
@ -108,26 +108,26 @@ def __subs{a:T,b:T & T==[32]u8 } = emit{T, '_mm256_subs_epu8', a, b}
|
||||
|
||||
|
||||
# structural operations
|
||||
def shl{S==[16]u8, x:T, n & w256{T}} = cast_v{T, emit{T, '_mm256_bslli_epi128', x, n}}
|
||||
def shr{S==[16]u8, x:T, n & w256{T}} = cast_v{T, emit{T, '_mm256_bsrli_epi128', x, n}}
|
||||
def shl{S==[16]u8, x:T, n & w256{T}} = T ~~ emit{T, '_mm256_bslli_epi128', x, n}
|
||||
def shr{S==[16]u8, x:T, n & w256{T}} = T ~~ emit{T, '_mm256_bsrli_epi128', x, n}
|
||||
|
||||
def blend{I==[8]u16, a:T, b:T, m & w256{T} & knum{m}} = cast_v{T, emit{[16]i16, '_mm256_blend_epi16', v2i{a}, v2i{b}, m}}
|
||||
def blend{I==[8]u32, a:T, b:T, m & w256{T} & knum{m}} = cast_v{T, emit{[ 8]i32, '_mm256_blend_epi32', v2i{a}, v2i{b}, m}}
|
||||
def blend{I==[4]u64, a:T, b:T, m & w256{T} & knum{m}} = cast_v{T, emit{[ 4]f64, '_mm256_blend_pd', v2d{a}, v2d{b}, m}}
|
||||
def blend{I==[8]u16, a:T, b:T, m & w256{T} & knum{m}} = T ~~ emit{[16]i16, '_mm256_blend_epi16', v2i{a}, v2i{b}, m}
|
||||
def blend{I==[8]u32, a:T, b:T, m & w256{T} & knum{m}} = T ~~ emit{[ 8]i32, '_mm256_blend_epi32', v2i{a}, v2i{b}, m}
|
||||
def blend{I==[4]u64, a:T, b:T, m & w256{T} & knum{m}} = T ~~ emit{[ 4]f64, '_mm256_blend_pd', v2d{a}, v2d{b}, m}
|
||||
|
||||
# blend by sign bit; still no 16-bit case
|
||||
def blend{f:T, t:T, m:M & w256{T} & w256i{M, 8}} = cast_v{T, emit{[32]i8, '_mm256_blendv_epi8', v2i{f}, v2i{t}, v2i{m}}}
|
||||
def blend{f:T, t:T, m:M & w256{T} & w256i{M, 8}} = T ~~ emit{[32]i8, '_mm256_blendv_epi8', v2i{f}, v2i{t}, v2i{m}}
|
||||
|
||||
# assumes all bits are the same in each mask item
|
||||
def blendF{f:T, t:T, m:M & w256{T} & w256{M} & width{eltype{M}}!=16} = blend{f, t, m}
|
||||
def blendF{f:T, t:T, m:M & w256{T} & w256{M,16}} = blend{f, t, cast_v{[32]i8,m}}
|
||||
def blendF{f:T, t:T, m:M & w256{T} & w256{M,16}} = blend{f, t, [32]i8 ~~ m}
|
||||
|
||||
def shuf{I==[4]u32, x:T, n & w256{T} & knum{n}} = cast_v{T, emit{[8]i32, '_mm256_shuffle_epi32', v2i{x}, n}}
|
||||
def shuf{I==[4]u64, x:T, n & w256{T} & knum{n}} = cast_v{T, emit{[4]f64, '_mm256_permute4x64_pd', v2d{x}, n}}
|
||||
def shufHalves{x:T, y:T, n & w256{T} & knum{n}} = T~~emit{[4]i64, '_mm256_permute2x128_si256', v2i{x}, v2i{y}, n}
|
||||
def shuf{I==[4]u32, x:T, n & w256{T} & knum{n}} = T ~~ emit{[8]i32, '_mm256_shuffle_epi32', v2i{x}, n}
|
||||
def shuf{I==[4]u64, x:T, n & w256{T} & knum{n}} = T ~~ emit{[4]f64, '_mm256_permute4x64_pd', v2d{x}, n}
|
||||
def shufHalves{x:T, y:T, n & w256{T} & knum{n}} = T ~~ emit{[4]i64, '_mm256_permute2x128_si256', v2i{x}, v2i{y}, n}
|
||||
|
||||
def sel{I, x:T, i:[8]i32 & w256{I,32}} = cast_v{T, emit{[32]u8, '_mm256_permutevar8x32_epi32', v2i{x}, i}}
|
||||
def sel{I, x:T, i:[32]i8 & w128{I, 8}} = cast_v{T, emit{[32]u8, '_mm256_shuffle_epi8', v2i{x}, i}}
|
||||
def sel{I, x:T, i:[8]i32 & w256{I,32}} = T ~~ emit{[32]u8, '_mm256_permutevar8x32_epi32', v2i{x}, i}
|
||||
def sel{I, x:T, i:[32]i8 & w128{I, 8}} = T ~~ emit{[32]u8, '_mm256_shuffle_epi8', v2i{x}, i}
|
||||
|
||||
def extract{x:T, i & w256i{T,8 } & knum{i}} = emit{eltype{T}, '_mm256_extract_epi8', x, i}
|
||||
def extract{x:T, i & w256i{T,16} & knum{i}} = emit{eltype{T}, '_mm256_extract_epi16', x, i}
|
||||
@ -138,9 +138,9 @@ def getmask{x:T & w256{T, 16}} = {
|
||||
msk:u32 = getmask{emit{[32]u8, '_mm256_packs_epi16', x, broadcast{[16]u16, 0}}}
|
||||
(msk&255) | (msk>>8)
|
||||
}
|
||||
def any{x:T & w256i{T, 16}} = getmask{cast_v{[32]u8,x}}!=0
|
||||
def all{x:T & w256i{T, 16}} = getmask{cast_v{[32]u8,x}} == (1<<32)-1
|
||||
def anyneg{x:T & w256s{T, 16}} = getmask{cast_v{[32]u8, cast_v{[16]i16,x} < broadcast{[16]i16, 0}}}!=0
|
||||
def any{x:T & w256i{T, 16}} = getmask{[32]u8~~x}!=0
|
||||
def all{x:T & w256i{T, 16}} = getmask{[32]u8~~x} == (1<<32)-1
|
||||
def anyneg{x:T & w256s{T, 16}} = getmask{[32]u8 ~~ ([16]i16~~x < broadcast{[16]i16, 0})}!=0
|
||||
|
||||
# conversion
|
||||
# convert packed elements of type F to a result T
|
||||
|
||||
@ -3,6 +3,7 @@ include 'arch/c'
|
||||
def Size = u64
|
||||
oper infix right ~~ reinterpret 55
|
||||
|
||||
def load{x} = *x
|
||||
# TODO move these to a more base file
|
||||
def knum{x} = match{'number',kind{x}}
|
||||
def trunc{T, x:U & isint{T} & isint{U} & T<=U} = emit{T, '', x}
|
||||
|
||||
@ -24,7 +24,7 @@ def b_setBatch{sz, x:*u64, n:(Size), v} = { vc:u64 = ext{u64,v}
|
||||
}
|
||||
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==4} = {
|
||||
x8:= cast_p{u8, x}
|
||||
x8:= *u8 ~~ x
|
||||
|
||||
#w:u64 = cast_i{u64, load{x8,n/2}}
|
||||
#sh:u64 = (n&1) * 4
|
||||
@ -42,7 +42,7 @@ def b_setBatch{sz, x:*u64, n:(Size), v & sz==4} = {
|
||||
|
||||
store{x8, n/2, cast_i{u8,w}}
|
||||
}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz== 8} = store{cast_p{u8, x}, n, cast_i{u8, v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==16} = store{cast_p{u16, x}, n, cast_i{u16,v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==32} = store{cast_p{u32, x}, n, cast_i{u32,v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==64} = store{x, n, cast_i{u64,v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz== 8} = store{*u8 ~~ x, n, cast_i{u8, v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==16} = store{*u16 ~~ x, n, cast_i{u16,v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==32} = store{*u32 ~~ x, n, cast_i{u32,v}}
|
||||
def b_setBatch{sz, x:*u64, n:(Size), v & sz==64} = store{ x, n, cast_i{u64,v}}
|
||||
|
||||
@ -83,20 +83,20 @@ def any2bit{VT, unr, op, wS, wV, xS, xV, dst:*u64, len:(Size)} = {
|
||||
}
|
||||
}
|
||||
aa2bit{VT, unr, op}(dst:*u64, wr:*u8, xr:*u8, len:Size) : void = {
|
||||
wv:= cast_vp{VT, wr}; ws:= cast_p{eltype{VT}, wr}
|
||||
xv:= cast_vp{VT, xr}; xs:= cast_p{eltype{VT}, xr}
|
||||
wv:= *VT ~~ wr; ws:= *eltype{VT} ~~ wr
|
||||
xv:= *VT ~~ xr; xs:= *eltype{VT} ~~ xr
|
||||
any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>load{wv,i}, {i}=>load{xs,i}, {i}=>load{xv,i}, dst, len}
|
||||
}
|
||||
|
||||
as2bit{VT, unr, op}(dst:*u64, wr:*u8, x:u64, len:Size) : void = { # show{VT,unr,fmt{op}}
|
||||
wv:= cast_vp{VT, wr}; ws:= cast_p{eltype{VT}, wr}
|
||||
wv:= *VT~~wr; ws:= *eltype{VT}~~wr
|
||||
xv:= broadcast{VT, pathAS{dst, len, eltype{VT}, op, x}}
|
||||
any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>load{wv,i}, {i}=>x, {i}=>xv, dst, len}
|
||||
}
|
||||
|
||||
bitAA{bitop}(dst:*u64, wr:*u8, xr:*u8, len:Size) : void = {
|
||||
ws:= cast_p{u64, wr}
|
||||
xs:= cast_p{u64, xr}
|
||||
ws:= *u64~~wr
|
||||
xs:= *u64~~xr
|
||||
@for (dst,ws,xs over _ from 0 to cdiv{len,64}) dst = bitop{ws,xs}
|
||||
}
|
||||
|
||||
@ -115,8 +115,8 @@ bitAS{op}(dst:*u64, wr:*u8, x:u64, len:Size) : void = { # show{'bitAS'}
|
||||
fillbits{dst, len, r0}
|
||||
return{}
|
||||
}
|
||||
if (r0) call{not, dst, cast_p{u64,wr}, len}
|
||||
else call{cpy, dst, cast_p{u64,wr}, len}
|
||||
if (r0) call{not, dst, *u64~~wr, len}
|
||||
else call{cpy, dst, *u64~~wr, len}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -41,10 +41,10 @@ def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i16==eltype{T}} =
|
||||
tup{rl, anyne{rh, rl>>15, M}}
|
||||
}
|
||||
def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i8==eltype{T}} = {
|
||||
def wp = unpackQ{w, cast_v{T,broadcast{T,0}>w}}
|
||||
def xp = unpackQ{x, cast_v{T,broadcast{T,0}>x}}
|
||||
def wp = unpackQ{w, T ~~ (broadcast{T,0}>w)}
|
||||
def xp = unpackQ{x, T ~~ (broadcast{T,0}>x)}
|
||||
def rp = each{__mul, wp, xp}
|
||||
def bad = each{{v}=>cast_v{[16]i16,(v<<8)>>8 != v}, rp}
|
||||
def bad = each{{v} => [16]i16 ~~ ((v<<8)>>8 != v), rp}
|
||||
if (M{0}) { # masked check
|
||||
tup{packQ{rp}, any{M{packQ{bad}}}}
|
||||
} else { # unmasked check; can do check in a simpler way
|
||||
@ -52,10 +52,10 @@ def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i8==eltype{T}} = {
|
||||
}
|
||||
}
|
||||
def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i32==eltype{T}} = {
|
||||
max:= cast_v{[8]f32, broadcast{[8]u32, 0x4efffffe}}
|
||||
max:= [8]f32 ~~ broadcast{[8]u32, 0x4efffffe}
|
||||
def cf32{x} = emit{[8]f32, '_mm256_cvtepi32_ps', x}
|
||||
f32mul:= cf32{w} * cf32{x}
|
||||
tup{w*x, any{M{cast_v{[8]u32, emit{[8]f32, '_mm256_cmp_ps', abs{f32mul}, max, 29}}}}}
|
||||
tup{w*x, any{M{[8]u32 ~~ emit{[8]f32, '_mm256_cmp_ps', abs{f32mul}, max, 29}}}}
|
||||
# TODO fallback to the below if the above fails
|
||||
# TODO don't do this, but instead shuffle one half, do math, unshuffle that half
|
||||
# def wp = unpackQ{w, broadcast{T, 0}}
|
||||
@ -63,7 +63,7 @@ def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i32==eltype{T}} =
|
||||
# def rp = each{__mul32, wp, xp}
|
||||
# def T2 = ty_dbl{T}
|
||||
# def bad = each{{v}=>{
|
||||
# ((cast_v{T2,v} + broadcast{T2,0x80000000}) ^ broadcast{T2, cast{i64,1}<<63}) > broadcast{T2, cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF}}
|
||||
# (((T2~~v) + broadcast{T2,0x80000000}) ^ broadcast{T2, cast{i64,1}<<63}) > broadcast{T2, cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF}}
|
||||
# }, rp}
|
||||
# tup{packQQ{each{{v} => v&broadcast{T2, 0xFFFFFFFF}, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}} TODO use M
|
||||
}
|
||||
@ -88,15 +88,15 @@ def arithAny{VT, F, W, X, r, len} = {
|
||||
}
|
||||
|
||||
def arithAA{VT, F, w, x, r, len} = {
|
||||
arithAny{VT, F, {i}=>load{cast_p{VT,w}, i}, {i}=>load{cast_p{VT,x}, i}, r, len}
|
||||
arithAny{VT, F, {i}=>load{*VT~~w, i}, {i}=>load{*VT~~x, i}, r, len}
|
||||
}
|
||||
def arithAS{VT, F, w, x, r, len} = {
|
||||
xv:= broadcast{VT, x}
|
||||
arithAny{VT, F, {i}=>load{cast_p{VT,w}, i}, {i}=>xv, r, len}
|
||||
arithAny{VT, F, {i}=>load{*VT~~w, i}, {i}=>xv, r, len}
|
||||
}
|
||||
def arithSA{VT, F, w, x, r, len} = {
|
||||
wv:= broadcast{VT, w}
|
||||
arithAny{VT, F, {i}=>wv, {i}=>load{cast_p{VT,x}, i}, r, len}
|
||||
arithAny{VT, F, {i}=>wv, {i}=>load{*VT~~x, i}, r, len}
|
||||
}
|
||||
|
||||
|
||||
@ -109,9 +109,9 @@ def cast_fB{T, x:(u64) & issigned{T} & T<i64} = {
|
||||
r
|
||||
}
|
||||
|
||||
arithAA{F,VT}(w: *u8, x: *u8, r: *u8, len: Size) : Size = { def c{x}=cast_p{eltype{VT}, x}; arithAA{VT, F, c{w}, c{x}, c{r}, len} }
|
||||
arithAS{F,VT}(w: *u8, x: u64, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithAS{VT, F, cast_p {T, w}, cast_fB{T, x}, cast_p{T, r}, len} }
|
||||
arithSA{F,VT}(w: u64, x: *u8, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithSA{VT, F, cast_fB{T, w}, cast_p {T, x}, cast_p{T, r}, len} }
|
||||
arithAA{F,VT}(w: *u8, x: *u8, r: *u8, len: Size) : Size = { def c{x} = *eltype{VT} ~~ x; arithAA{VT, F, c{w}, c{x}, c{r}, len} }
|
||||
arithAS{F,VT}(w: *u8, x: u64, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithAS{VT, F, *T ~~ w, cast_fB{T, x}, *T~~r, len} }
|
||||
arithSA{F,VT}(w: u64, x: *u8, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithSA{VT, F, cast_fB{T, w}, *T ~~ x, *T~~r, len} }
|
||||
|
||||
'avx2_addAA_i8' = arithAA{__add,[32]i8 }; 'avx2_addAS_i8' = arithAS{__add,[32]i8 }; 'avx2_addSA_i8' = arithSA{__add,[32]i8 }
|
||||
'avx2_addAA_i16' = arithAA{__add,[16]i16}; 'avx2_addAS_i16' = arithAS{__add,[16]i16}; 'avx2_addSA_i16' = arithSA{__add,[16]i16}
|
||||
|
||||
@ -18,8 +18,8 @@ equal{W, X}(w:*u8, x:*u8, l:u64, d:u64) : u1 = {
|
||||
if (W==u1) {
|
||||
if (X==u1) { # bitarr ≡ bitarr
|
||||
maskedLoop{256, l, {i, M} => {
|
||||
cw:= load{cast_p{[32]u8, w}, i}
|
||||
cx:= load{cast_p{[32]u8, x}, i}
|
||||
cw:= load{*[32]u8 ~~ w, i}
|
||||
cx:= load{*[32]u8 ~~ x, i}
|
||||
if (anyneBit{cw,cx,M}) return{0}
|
||||
}}
|
||||
} else if (X==f64) { # bitarr ≡ f64arr
|
||||
@ -28,21 +28,21 @@ equal{W, X}(w:*u8, x:*u8, l:u64, d:u64) : u1 = {
|
||||
f0:= broadcast{T, 0.0}
|
||||
f1:= broadcast{T, 1.0}
|
||||
maskedLoop{bulk, l, {i, M} => {
|
||||
cw:= load{cast_p{u8,w}, i>>1} >> cast_i{u8, 4*(i&1)}
|
||||
cx:= load{cast_p{T, x}, i}
|
||||
cw:= load{*u8 ~~ w, i>>1} >> cast_i{u8, 4*(i&1)}
|
||||
cx:= load{*T ~~ x, i}
|
||||
wu:= blend{f0, f1, broadcast{[4]u64, cw} << make{[4]u64,63,62,61,60}}
|
||||
if (anyne{wu, cx, M}) return{0}
|
||||
}}
|
||||
} else { # bitarr ≡ i8/i16/i32arr
|
||||
def T = [256/width{X}]X
|
||||
def sh{c} = c << (width{X}-1)
|
||||
def sh{c & X==u8} = cast_v{T, to_el{u16,c}<<7}
|
||||
def sh{c & X==u8} = T ~~ (to_el{u16,c}<<7)
|
||||
|
||||
# TODO compare with doing the comparison in vector registers
|
||||
badBits:= broadcast{T, ~cast{X,1}}
|
||||
maskedLoop{bulk, l, {i, M} => {
|
||||
cw:= load{cast_p{ty_u{bulk}, w}, i}
|
||||
cx:= load{cast_p{T,x}, i}
|
||||
cw:= load{*ty_u{bulk} ~~ w, i}
|
||||
cx:= load{*T ~~ x, i}
|
||||
if (~andIsZero{M{cx}, badBits}) return{0}
|
||||
if (anyne{promote{u64,getmask{sh{cx}}}, promote{u64,cw}, M}) return{0}
|
||||
}}
|
||||
@ -54,8 +54,8 @@ equal{W, X}(w:*u8, x:*u8, l:u64, d:u64) : u1 = {
|
||||
def fac = width{X}/width{W}
|
||||
|
||||
maskedLoop{bulk, l, {i, M} => {
|
||||
cw:= load{cast_p{ww{tern{fac==1, 256, 128}, u8}, w + i*32/fac}, 0}
|
||||
cx:= load{cast_p{ww{256, X}, x}, i}
|
||||
cw:= load{*ww{tern{fac==1, 256, 128}, u8} ~~ (w + i*32/fac)}
|
||||
cx:= load{*ww{256, X} ~~ x, i}
|
||||
cwc:= cvt{W, ww{256, X}, cw}
|
||||
if (anyne{cwc,cx,M}) return{0}
|
||||
}}
|
||||
|
||||
@ -16,17 +16,17 @@ mask256_64:*i64 = maskInit{256,i64}; mask128_64:*i64 = maskInit{128,i64}
|
||||
mask256_1 :*u8 = maskInit1{256}; mask128_1 :*u8 = maskInit1{128}
|
||||
|
||||
|
||||
def maskOf{T,n & w256{T, 8}} = load{cast_p{[32]u8, mask256_8 + (n^31)}, 0}
|
||||
def maskOf{T,n & w256{T,16}} = load{cast_p{[16]u16, mask256_16 + (n^15)}, 0}
|
||||
def maskOf{T,n & w256{T,32}} = load{cast_p{[ 8]u32, mask256_32 + (n^7)}, 0}
|
||||
def maskOf{T,n & w256{T,64}} = load{cast_p{[ 4]u64, mask256_64 + (n^3)}, 0}
|
||||
def maskOfBit{T,n & w256{T}} = load{cast_p{[32]u8, mask256_1 + (n>>3)^31 + 64*(n&7)}, 0}
|
||||
def maskOf{T,n & w256{T, 8}} = load{*[32]u8 ~~ (mask256_8 + (n^31))}
|
||||
def maskOf{T,n & w256{T,16}} = load{*[16]u16 ~~ (mask256_16 + (n^15))}
|
||||
def maskOf{T,n & w256{T,32}} = load{*[ 8]u32 ~~ (mask256_32 + (n^7))}
|
||||
def maskOf{T,n & w256{T,64}} = load{*[ 4]u64 ~~ (mask256_64 + (n^3))}
|
||||
def maskOfBit{T,n & w256{T}} = load{*[32]u8 ~~ (mask256_1 + (n>>3)^31 + 64*(n&7))}
|
||||
|
||||
def maskOf{T,n & w128{T, 8}} = load{cast_p{[16]u8, mask128_8 + (n^15)}, 0}
|
||||
def maskOf{T,n & w128{T,16}} = load{cast_p{[ 8]u16, mask128_16 + (n^7)}, 0}
|
||||
def maskOf{T,n & w128{T,32}} = load{cast_p{[ 4]u32, mask128_32 + (n^3)}, 0}
|
||||
def maskOf{T,n & w128{T,64}} = load{cast_p{[ 2]u64, mask128_64 + (n^1)}, 0}
|
||||
def maskOfBit{T,n & w128{T}} = load{cast_p{[16]u8, mask128_1 + (n>>3)^15 + 32*(n&7)}, 0}
|
||||
def maskOf{T,n & w128{T, 8}} = load{*[16]u8 ~~ (mask128_8 + (n^15))}
|
||||
def maskOf{T,n & w128{T,16}} = load{*[ 8]u16 ~~ (mask128_16 + (n^7))}
|
||||
def maskOf{T,n & w128{T,32}} = load{*[ 4]u32 ~~ (mask128_32 + (n^3))}
|
||||
def maskOf{T,n & w128{T,64}} = load{*[ 2]u64 ~~ (mask128_64 + (n^1))}
|
||||
def maskOfBit{T,n & w128{T}} = load{*[16]u8 ~~ (mask128_1 + (n>>3)^15 + 32*(n&7))}
|
||||
|
||||
def anyne{x:T, y:T, M & M{0}==0 & isvec{T}} = ~all{x==y}
|
||||
def anyne{x:T, y:T, M & M{0}==1 & isvec{T}} = any{M{x!=y}}
|
||||
@ -37,9 +37,9 @@ def anyneBit{x:T, y:T, M} = ~M{x^y, 'all bits zeroes'}
|
||||
def maskNone{x} = x
|
||||
def maskNone{x, mode=='all bits zeroes'} = andIsZero{x, x}
|
||||
def maskAfter{n} = {
|
||||
def mask{x:X & isvec{X}} = x&cast_v{X,maskOf{X,n}}
|
||||
def mask{x:X & isvec{X}} = x & (X~~maskOf{X,n})
|
||||
def mask{x:X & anyInt{x}} = x & ((1<<n) - 1)
|
||||
def mask{x:X, mode=='all bits zeroes'} = andIsZero{x, cast_v{X,maskOfBit{X,n}}}
|
||||
def mask{x:X, mode=='all bits zeroes'} = andIsZero{x, X~~maskOfBit{X,n}}
|
||||
def mask{X, mode=='to sign bits'} = maskOf{X,n}
|
||||
def mask{mode=='count'} = n
|
||||
def mask{x==0} = 1
|
||||
@ -57,7 +57,7 @@ def storeBatch{ptr:P, n, x:T, M} = {
|
||||
def E0 = eltype{P}
|
||||
xu:= ucvt{E0, x}
|
||||
def TF = to_el{E0, T}
|
||||
if (M{0}) maskstoreF{cast_p{TF, rpos}, M{TF, 'to sign bits'}, 0, xu}
|
||||
if (M{0}) maskstoreF{*TF~~rpos, M{TF, 'to sign bits'}, 0, xu}
|
||||
else storeLow{rpos, vcount{T}*width{E0}, xu}
|
||||
}
|
||||
|
||||
@ -66,8 +66,8 @@ def loadBatch{ptr:P, n, T} = {
|
||||
def rpos = ptr + n*vcount{T}
|
||||
def E0 = eltype{P}
|
||||
|
||||
if (width{eltype{T}} == width{E0}) load{cast_p{T, rpos}, 0}
|
||||
else cvt{E0, T, load{cast_p{[16]u8, rpos}, 0}}
|
||||
if (width{eltype{T}} == width{E0}) load{*T ~~ rpos}
|
||||
else cvt{E0, T, load{*[16]u8 ~~ rpos}}
|
||||
}
|
||||
|
||||
def maskedLoop{bulk, l, step} = {
|
||||
|
||||
@ -8,9 +8,7 @@ avx2_bcs32(x:*u64, r:*i32, l:u64) : void = {
|
||||
rv:= *[8]u32~~r
|
||||
xv:= *u32~~x
|
||||
c:= broadcast{[8]u32, 0}
|
||||
|
||||
def tail{k,i} = i - i>>k<<k # Last k bits of i
|
||||
def bit {k,i} = tail{1,i>>k}<<k
|
||||
|
||||
def ii32 = iota{32}; def bit{k}=bit{k,ii32}; def tail{k}=tail{k,ii32}
|
||||
def sums{n} = (if (n==0) tup{0}; else { def s=sums{n-1}; merge{s,s+1} })
|
||||
def sel8{v, t} = sel{[16]u8, v, make{[32]i8, t}}
|
||||
|
||||
@ -14,12 +14,12 @@ def minBulk{w, A, B & width{A}>=width{B}} = w/width{A}
|
||||
# b:B - pointer to data to index; if width{B}<width{eltype{T}}, padding bytes are garbage read after wanted position
|
||||
# idx - actual (unscaled) index list
|
||||
def gather{def:T, b:B, idx:[8]i32, M & w256{T,32}} = {
|
||||
if (M{0}) cast_v{T, emit{[8]i32, '_mm256_mask_i32gather_epi32', def, cast_p{i32,b}, idx, M{T,'to sign bits'}, width{eltype{B}}/8}}
|
||||
else cast_v{T, emit{[8]i32, '_mm256_i32gather_epi32', cast_p{i32,b}, idx, width{eltype{B}}/8}}
|
||||
if (M{0}) T ~~ emit{[8]i32, '_mm256_mask_i32gather_epi32', def, *i32~~b, idx, M{T,'to sign bits'}, width{eltype{B}}/8}
|
||||
else T ~~ emit{[8]i32, '_mm256_i32gather_epi32', *i32~~b, idx, width{eltype{B}}/8}
|
||||
}
|
||||
def gather{def:T, b:B, idx:[4]i32, M & w256{T,64}} = {
|
||||
if (M{0}) cast_v{T, emit{[4]i64, '_mm256_mask_i32gather_epi64', def, cast_p{i64,b}, idx, M{T,'to sign bits'}, width{eltype{B}}/8}}
|
||||
else cast_v{T, emit{[4]i64, '_mm256_i32gather_epi64', cast_p{i64,b}, idx, width{eltype{B}}/8}}
|
||||
if (M{0}) T ~~ emit{[4]i64, '_mm256_mask_i32gather_epi64', def, *i64~~b, idx, M{T,'to sign bits'}, width{eltype{B}}/8}
|
||||
else T ~~ emit{[4]i64, '_mm256_i32gather_epi64', *i64~~b, idx, width{eltype{B}}/8}
|
||||
}
|
||||
|
||||
|
||||
@ -31,9 +31,9 @@ select{rw, TI, TD}(w0:*u8, x0:*u8, r0:*u8, wl:u64, xl:u64) : u1 = {
|
||||
def TDF = [bulk]TDE
|
||||
def xlf = broadcast{TIF, cast_i{eltype{TIF}, xl}}
|
||||
|
||||
w:=cast_p{TI,w0}
|
||||
x:=cast_p{TD,x0}
|
||||
r:=cast_p{TD,r0}
|
||||
w:= *TI ~~ w0
|
||||
x:= *TD ~~ x0
|
||||
r:= *TD ~~ r0
|
||||
|
||||
maskedLoop{bulk, wl, {i, M} => {
|
||||
cw0:= loadBatch{w, i, TIF}
|
||||
|
||||
@ -32,7 +32,7 @@ def comp16{w:*u64, X, r:*i16, l:u64} = {
|
||||
|
||||
slash2{F, T}(w:*u64, x:*T, r:*T, l:u64) : void = {
|
||||
xv:= reinterpret{*u64, x}
|
||||
F{w, {} => {c:=load{xv,0}; xv+=1; c}, r, l}
|
||||
F{w, {} => {c:= *xv; xv+=1; c}, r, l}
|
||||
}
|
||||
|
||||
slash1{F, T, iota, add}(w:*u64, r:*T, l:u64) : void = {
|
||||
|
||||
@ -16,27 +16,23 @@ def w128s = wgen128{{T} => isint{T} & issigned{T}}
|
||||
def w128u = wgen128{{T} => isint{T} & isunsigned{T}}
|
||||
def w128f = wgen128{{T} => isfloat{T}}
|
||||
|
||||
def cast_v{T, x:S & match{show{T},show{type{x}}}} = x
|
||||
def cast_v{R, x:S & w128i{R } & w128{S}} = emit{R, '(__m128i)', x}
|
||||
def cast_v{R, x:S & w128f{R,32} & w128{S}} = emit{R, '(__m128)', x}
|
||||
def cast_v{R, x:S & w128f{R,64} & w128{S}} = emit{R, '(__m128d)', x}
|
||||
def v2i{x:T & w128{T}} = cast_v{[16]u8, x} # for compact casting for the annoying intrinsic type system
|
||||
def v2f{x:T & w128{T}} = cast_v{[4]f32, x}
|
||||
def v2d{x:T & w128{T}} = cast_v{[2]f64, x}
|
||||
def v2i{x:T & w128{T}} = [16]u8 ~~ x # for compact casting for the annoying intrinsic type system
|
||||
def v2f{x:T & w128{T}} = [4]f32 ~~ x
|
||||
def v2d{x:T & w128{T}} = [2]f64 ~~ x
|
||||
|
||||
# load & store
|
||||
def load {a:T, n & w128i{eltype{T} }} = emit{eltype{T}, '_mm_loadu_si128', a+n}
|
||||
def loada{a:T, n & w128i{eltype{T} }} = emit{eltype{T}, '_mm_load_si128', a+n}
|
||||
def load {a:T, n & w128f{eltype{T},64}} = emit{eltype{T}, '_mm_loadu_pd', cast_p{f64, a+n}}
|
||||
def loada{a:T, n & w128f{eltype{T},64}} = emit{eltype{T}, '_mm_load_pd', cast_p{f64, a+n}}
|
||||
def load {a:T, n & w128f{eltype{T},32}} = emit{eltype{T}, '_mm_loadu_ps', cast_p{f32, a+n}}
|
||||
def loada{a:T, n & w128f{eltype{T},32}} = emit{eltype{T}, '_mm_load_ps', cast_p{f32, a+n}}
|
||||
def store {a:T, n, v & w128i{eltype{T} }} = emit{void, '_mm_storeu_si128', a+n, v}
|
||||
def storea{a:T, n, v & w128i{eltype{T} }} = emit{void, '_mm_store_si128', a+n, v}
|
||||
def store {a:T, n, v & w128f{eltype{T},64}} = emit{void, '_mm_storeu_pd', cast_p{f64, a+n}, v}
|
||||
def storea{a:T, n, v & w128f{eltype{T},64}} = emit{void, '_mm_store_pd', cast_p{f64, a+n}, v}
|
||||
def store {a:T, n, v & w128f{eltype{T},32}} = emit{void, '_mm_storeu_ps', cast_p{f32, a+n}, v}
|
||||
def storea{a:T, n, v & w128f{eltype{T},32}} = emit{void, '_mm_store_ps', cast_p{f32, a+n}, v}
|
||||
def load {a:T, n & w128i{eltype{T} }} = emit{eltype{T}, '_mm_loadu_si128', a+n}
|
||||
def loada{a:T, n & w128i{eltype{T} }} = emit{eltype{T}, '_mm_load_si128', a+n}
|
||||
def load {a:T, n & w128f{eltype{T},64}} = emit{eltype{T}, '_mm_loadu_pd', *f64 ~~ (a+n)}
|
||||
def loada{a:T, n & w128f{eltype{T},64}} = emit{eltype{T}, '_mm_load_pd', *f64 ~~ (a+n)}
|
||||
def load {a:T, n & w128f{eltype{T},32}} = emit{eltype{T}, '_mm_loadu_ps', *f32 ~~ (a+n)}
|
||||
def loada{a:T, n & w128f{eltype{T},32}} = emit{eltype{T}, '_mm_load_ps', *f32 ~~ (a+n)}
|
||||
def store {a:T, n, v & w128i{eltype{T} }} = emit{void, '_mm_storeu_si128', a+n, v}
|
||||
def storea{a:T, n, v & w128i{eltype{T} }} = emit{void, '_mm_store_si128', a+n, v}
|
||||
def store {a:T, n, v & w128f{eltype{T},64}} = emit{void, '_mm_storeu_pd', *f64 ~~ (a+n), v}
|
||||
def storea{a:T, n, v & w128f{eltype{T},64}} = emit{void, '_mm_store_pd', *f64 ~~ (a+n), v}
|
||||
def store {a:T, n, v & w128f{eltype{T},32}} = emit{void, '_mm_storeu_ps', *f32 ~~ (a+n), v}
|
||||
def storea{a:T, n, v & w128f{eltype{T},32}} = emit{void, '_mm_store_ps', *f32 ~~ (a+n), v}
|
||||
|
||||
# broadcast
|
||||
def broadcast{T, v & w128i{T, 8}} = emit{T, '_mm_set1_epi8', ext{eltype{T},v}}
|
||||
@ -82,9 +78,9 @@ def getmask{x:T & w128{T, 64}} = emit{u8, '_mm_movemask_pd', v2d{x}}
|
||||
def any{x:T & w128i{T}} = getmask{x} != 0 # assumes elements of x all have equal bits (avx2 utilizes this for 16 bits)
|
||||
def all{x:T & w128i{T}} = getmask{x} == (1<<vcount{T})-1 # same assumption
|
||||
def anyneg{x:T & w128s{T}} = getmask{x}!=0
|
||||
def any{x:T & w128i{T, 16}} = getmask{cast_v{[32]u8,x}}!=0
|
||||
def all{x:T & w128i{T, 16}} = getmask{cast_v{[32]u8,x}} == (1<<32)-1
|
||||
def anyneg{x:T & w128s{T, 16}} = getmask{cast_v{[32]u8, cast_v{[16]i16,x} < broadcast{[16]i16, 0}}}!=0
|
||||
def any{x:T & w128i{T, 16}} = getmask{[32]u8 ~~ x}!=0
|
||||
def all{x:T & w128i{T, 16}} = getmask{[32]u8 ~~ x} == (1<<32)-1
|
||||
def anyneg{x:T & w128s{T, 16}} = getmask{[32]u8 ~~ (([16]i16~~x) < broadcast{[16]i16, 0})}!=0
|
||||
|
||||
# the lone SSE2 extract
|
||||
def extract{x:T, i & w128i{T,16} & knum{i}} = emit{eltype{T}, '_mm_extract_epi16', x, i}
|
||||
@ -109,13 +105,13 @@ def cvt{F==i16, T==[4]i32, a:A & w128i{A}} = emit{T, '_mm_cvtepi16_epi32', a}
|
||||
def andIsZero{x:T, y:T & w128i{T}} = emit{u1, '_mm_testz_si128', x, y}
|
||||
|
||||
# blend by sign bit; no 16-bit case
|
||||
def blend{f:T, t:T, m:M & w128{T} & w128i{M,32}} = cast_v{T, emit{[4]f32, '_mm_blendv_ps', v2f{f}, v2f{t}, v2f{m}}}
|
||||
def blend{f:T, t:T, m:M & w128{T} & w128i{M,64}} = cast_v{T, emit{[2]f64, '_mm_blendv_pd', v2d{f}, v2d{t}, v2d{m}}}
|
||||
def blend{f:T, t:T, m:M & w128{T} & w128i{M, 8}} = cast_v{T, emit{[16]i8, '_mm_blendv_epi8', v2i{f}, v2i{t}, v2i{m}}}
|
||||
def blend{f:T, t:T, m:M & w128{T} & w128i{M,32}} = T ~~ emit{[4]f32, '_mm_blendv_ps', v2f{f}, v2f{t}, v2f{m}}
|
||||
def blend{f:T, t:T, m:M & w128{T} & w128i{M,64}} = T ~~ emit{[2]f64, '_mm_blendv_pd', v2d{f}, v2d{t}, v2d{m}}
|
||||
def blend{f:T, t:T, m:M & w128{T} & w128i{M, 8}} = T ~~ emit{[16]i8, '_mm_blendv_epi8', v2i{f}, v2i{t}, v2i{m}}
|
||||
|
||||
# assumes all bits are the same in each mask item
|
||||
def blendF{f:T, t:T, m:M & w128{T} & w128{M} & width{eltype{M}}!=16} = blend{f, t, m}
|
||||
def blendF{f:T, t:T, m:M & w128{T} & w128{M,16}} = blend{f, t, cast_v{[16]i8,m}}
|
||||
def blendF{f:T, t:T, m:M & w128{T} & w128{M,16}} = blend{f, t, [16]i8,m}
|
||||
|
||||
def min{a:T,b:T & T==[16]i8 } = emit{T, '_mm_min_epi8', a, b}; def max{a:T,b:T & T==[16]i8 } = emit{T, '_mm_max_epi8', a, b}
|
||||
def min{a:T,b:T & T==[ 4]i32} = emit{T, '_mm_min_epi32', a, b}; def max{a:T,b:T & T==[ 4]i32} = emit{T, '_mm_max_epi32', a, b}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user