diff --git a/src/builtins/arithd.c b/src/builtins/arithd.c index 4b291f35..f814daf5 100644 --- a/src/builtins/arithd.c +++ b/src/builtins/arithd.c @@ -6,6 +6,11 @@ #define BCALL(N, X) N(b(X)) #define interp_f64(X) b(X).f +static i8 mask8[] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; +static i16 mask16[] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; +static i32 mask32[] = {-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0}; +static i64 mask64[] = {-1,-1,-1,0,0,0}; + #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-variable" #include "../singeli/gen/dyarith.c" diff --git a/src/main.c b/src/main.c index 044618cc..1e085e52 100644 --- a/src/main.c +++ b/src/main.c @@ -220,7 +220,9 @@ int main(int argc, char* argv[]) { B val = e->vars[i]; e->vars[i] = bi_noVar; dec(val); - if (!gc_depth) gc_forceGC(); + #if ENABLE_GC + if (!gc_depth) gc_forceGC(); + #endif goto cont; } } diff --git a/src/singeli/src/avx.singeli b/src/singeli/src/avx.singeli index 164c02c9..e745d852 100644 --- a/src/singeli/src/avx.singeli +++ b/src/singeli/src/avx.singeli @@ -20,7 +20,6 @@ def cast_vp{T, x & w256{T}} = emit{*T, '(void*)', x} def cast_v{R, x:S & w256i{R } & w256{S}} = emit{R, '(__m256i)', x} def cast_v{R, x:S & w256f{R,32} & w256{S}} = emit{R, '(__m256)', x} def cast_v{R, x:S & w256f{R,64} & w256{S}} = emit{R, '(__m256d)', x} - def v2i{x:T & w256{T}} = cast_v{[32]u8, x} # for compact casting for the annoying intrinsic type system def v2f{x:T & w256{T}} = cast_v{[8]f32, x} def v2d{x:T & w256{T}} = cast_v{[4]f64, x} @@ -30,14 +29,14 @@ def ty_vs{T & w256u{T}} = [vcount{T}](ty_is{eltype{T}}) def forv{T & w256{T}} = forc{{v}=>cast_vp{T,v}} # load & store -def load {a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_loadu_si256', emit{T, 'op +', a, n}} -def loada{a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_load_si256', emit{T, 'op +', a, n}} -def load {a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_loadu_pd', cast_p{f64, emit{T, 'op +', a, n}}} -def loada{a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_load_pd', cast_p{f64, emit{T, 'op +', a, n}}} -def store {a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_storeu_si256', emit{T, 'op +', a, n}, v} -def storea{a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_store_si256', emit{T, 'op +', a, n}, v} -def store {a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_storeu_pd', cast_p{f64, emit{T, 'op +', a, n}}, v} -def storea{a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_store_pd', cast_p{f64, emit{T, 'op +', a, n}}, v} +def load {a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_loadu_si256', a+n} +def loada{a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_load_si256', a+n} +def load {a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_loadu_pd', cast_p{f64, a+n}} +def loada{a:T, n & w256f{eltype{T},64}} = emit{eltype{T}, '_mm256_load_pd', cast_p{f64, a+n}} +def store {a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_storeu_si256', a+n, v} +def storea{a:T, n, v & w256i{eltype{T} }} = emit{void, '_mm256_store_si256', a+n, v} +def store {a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_storeu_pd', cast_p{f64, a+n}, v} +def storea{a:T, n, v & w256f{eltype{T},64}} = emit{void, '_mm256_store_pd', cast_p{f64, a+n}, v} # broadcast def broadcast{T, v & w256i{T, 8}} = emit{T, '_mm256_set1_epi8', ext{eltype{T},v}} @@ -54,9 +53,9 @@ def make{T==[8]i32,a,b,c,d,e,f,g,h} = emit{T,'_mm256_set_epi32',ext{i32,h},ext{i def make{T==[16]i16,a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p} = emit{T,'_mm256_set_epi16', ext{i16,p},ext{i16,o},ext{i16,n},ext{i16,m},ext{i16,l},ext{i16,k},ext{i16,j},ext{i16,i},ext{i16,h},ext{i16,g},ext{i16,f},ext{i16,e},ext{i16,d},ext{i16,c},ext{i16,b},ext{i16,a}} def make{T==[32]i8,a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P} = emit{T,'_mm256_set_epi8', ext{i8,P},ext{i8,O},ext{i8,N},ext{i8,M},ext{i8,L},ext{i8,K},ext{i8,J},ext{i8,I},ext{i8,H},ext{i8,G},ext{i8,F},ext{i8,E},ext{i8,D},ext{i8,C},ext{i8,B},ext{i8,A}, ext{i8,p},ext{i8,o},ext{i8,n},ext{i8,m},ext{i8,l},ext{i8,k},ext{i8,j},ext{i8,i},ext{i8,h},ext{i8,g},ext{i8,f},ext{i8,e},ext{i8,d},ext{i8,c},ext{i8,b},ext{i8,a}} -def __xor{a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_xor_ps', cast_v{[8]f32, a}, cast_v{[8]f32, b}}} -def __and{a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_and_ps', cast_v{[8]f32, a}, cast_v{[8]f32, b}}} -def __or {a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_or_ps', cast_v{[8]f32, a}, cast_v{[8]f32, b}}} +def __xor{a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_xor_ps', v2f{a}, v2f{b}}} +def __and{a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_and_ps', v2f{a}, v2f{b}}} +def __or {a:T, b:T & w256i{T}} = cast_v{T, emit{[8]f32, '_mm256_or_ps', v2f{a}, v2f{b}}} def __not{a:T & w256u{T}} = a ^ broadcast{T, ~cast{eltype{T},0}} @@ -77,7 +76,7 @@ def __div{a:T,b:T & T==[8]f32} = emit{T, '_mm256_div_ps', a, b} def max{a:T,b:T & T==[8]f32} = emit{T, '_mm256_max_ps', a, b} def min{a:T,b:T & T==[8]f32} = emit{T, '_mm256_min_ps', a, b} def sqrt{a:T,b:T & T==[8]f32} = emit{T, '_mm256_sqrt_ps', a, b} -def abs{a:[8]f32} = emit{[8]f32, '_mm256_and_ps', a, cast_v{[8]f32, broadcast{[8]u32, 0x7FFFFFFF}}} +def abs{a:[8]f32} = emit{[8]f32, '_mm256_and_ps', a, v2f{broadcast{[8]u32, 0x7FFFFFFF}}} def floor{a:[8]f32} = emit{[8]f32, '_mm256_floor_ps', a} def ceil{a:[8]f32} = emit{[8]f32, '_mm256_ceil_ps', a} @@ -89,7 +88,7 @@ def __div{a:T,b:T & T==[4]f64} = emit{T, '_mm256_div_pd', a, b} def max{a:T,b:T & T==[4]f64} = emit{T, '_mm256_max_pd', a, b} def min{a:T,b:T & T==[4]f64} = emit{T, '_mm256_min_pd', a, b} def sqrt{a:T,b:T & T==[4]f64} = emit{T, '_mm256_sqrt_pd', a, b} -def abs{a:[4]f64} = emit{[4]f64, '_mm256_and_pd', a, cast_v{[4]f64, broadcast{[4]u64, (cast{u64,1}<<63)-1}}} +def abs{a:[4]f64} = emit{[4]f64, '_mm256_and_pd', a, v2d{broadcast{[4]u64, (cast{u64,1}<<63)-1}}} def floor{a:[4]f64} = emit{[4]f64, '_mm256_floor_pd', a} def ceil{a:[4]f64} = emit{[4]f64, '_mm256_ceil_pd', a} @@ -104,11 +103,11 @@ def insert{x:T, i, v & w256i{T,32} & knum{i}} = emit{T, '_mm256_insert_epi32', x def insert{x:T, i, v & w256i{T,64} & knum{i}} = emit{T, '_mm256_insert_epi64', x, v, i} # mixed-width operations -def half{x:[8]i32, i & knum{i}} = emit{[4]i32, '_mm256_extracti128_si256', x, i} -def pair{a:[4]i32,b:[4]i32} = emit{[8]i32, '_mm256_setr_m128i', a, b} +def half{x:T, i & w256{T} & knum{i}} = cast_v{[vcount{T}/2](eltype{T}), emit{[8]i16, '_mm256_extracti128_si256', v2i{x}, i}} +def pair{a:T,b:T & width{T}==128} = cast_v{T, emit{[8]i32, '_mm256_setr_m128i', a, b}} # mask stuff -def getmask{x:T & w256{T, 32}} = emit{u8, '_mm256_movemask_ps', cast_v{[8]f32, x}} -def getmask{x:T & w256{T, 64}} = emit{u8, '_mm256_movemask_pd', cast_v{[4]f64, x}} +def getmask{x:T & w256{T, 32}} = emit{u8, '_mm256_movemask_ps', v2f{x}} +def getmask{x:T & w256{T, 64}} = emit{u8, '_mm256_movemask_pd', v2d{x}} def any{x:T & w256i{T}} = getmask{x}!=0 # assumes elements of x all have equal bits (avx2 utilizes this for 16 bits) def anyneg{x:T & w256s{T}} = getmask{x}!=0 \ No newline at end of file diff --git a/src/singeli/src/avx2.singeli b/src/singeli/src/avx2.singeli index 8f4730b1..2b4aa46f 100644 --- a/src/singeli/src/avx2.singeli +++ b/src/singeli/src/avx2.singeli @@ -1,3 +1,7 @@ +# maskstore +def maskstore{a:T, m:M, n, v & w256{eltype{T}, 32} & w256i{M, 32}} = emit{void, '_mm256_maskstore_epi32', cast_p{i32, a+n}, m, v} +def maskstore{a:T, m:M, n, v & w256{eltype{T}, 64} & w256i{M, 64}} = emit{void, '_mm256_maskstore_pd', cast_p{f64, a+n}, m, v} + # min & max def min{a:T,b:T & T==[32]i8 } = emit{T, '_mm256_min_epi8', a, b}; def min{a:T,b:T & T==[32]u8 } = emit{T, '_mm256_min_epu8', a, b} def min{a:T,b:T & T==[16]i16} = emit{T, '_mm256_min_epi16', a, b}; def min{a:T,b:T & T==[16]u16} = emit{T, '_mm256_min_epu16', a, b} @@ -105,6 +109,15 @@ def blend{I==[8]u16, a:T, b:T, m & w256{T} & knum{m}} = cast_v{T, emit{[16]i16, def blend{I==[8]u32, a:T, b:T, m & w256{T} & knum{m}} = cast_v{T, emit{[ 8]i32, '_mm256_blend_epi32', v2i{a}, v2i{b}, m}} def blend{I==[4]u64, a:T, b:T, m & w256{T} & knum{m}} = cast_v{T, emit{[ 4]f64, '_mm256_blend_pd', v2d{a}, v2d{b}, m}} +# blends by sign bit; no 16-bit case +def blend{a:T, b:T, m:M & w256{T} & w256i{M, 8}} = cast_v{T, emit{[32]i8, '_mm256_blendv_epi8', v2i{a}, v2i{b}, v2i{m}}} +def blend{a:T, b:T, m:M & w256{T} & w256i{M,32}} = cast_v{T, emit{[8]f32, '_mm256_blendv_ps', v2f{a}, v2f{b}, v2f{m}}} +def blend{a:T, b:T, m:M & w256{T} & w256i{M,64}} = cast_v{T, emit{[4]f64, '_mm256_blendv_pd', v2d{a}, v2d{b}, v2d{m}}} + +# assumes all bits are the same in each mask item +def blendf = blend +def blendf{a:T, b:T, m:M & w256{T} & w256i{M,16}} = blend{a, b, cast_v{[32]i8,m}} + def shuf{I==[4]u32, x:T, n & w256{T} & knum{n}} = cast_v{T, emit{[8]i32, '_mm256_shuffle_epi32', v2i{x}, n}} def shuf{I==[4]u64, x:T, n & w256{T} & knum{n}} = cast_v{T, emit{[4]f64, '_mm256_permute4x64_pd', v2d{x}, n}} diff --git a/src/singeli/src/base.singeli b/src/singeli/src/base.singeli index 4e6fee79..514c6d49 100644 --- a/src/singeli/src/base.singeli +++ b/src/singeli/src/base.singeli @@ -14,6 +14,10 @@ def isunsigned{T} = isint{T} & ~issigned{T} def assert{x:u1} = emit{void, 'si_assert', x} def cast_p{T, x} = emit{*T, '(void*)', x} +def anyInt{x} = knum{x} +def anyInt{x & match{'register',kind{x}} | match{'constant',kind{x}}} = isint{x} +def __add{a:T,b & match{'pointer',typekind{T}} & anyInt{b}} = emit{T, 'op +', a, b} +def __sub{a:T,b & match{'pointer',typekind{T}} & anyInt{b}} = emit{T, 'op +', a, b} def ty_iu{T & T==i8 } = u8; def ty_is{T & T==i8 } = u8 diff --git a/src/singeli/src/dyarith.singeli b/src/singeli/src/dyarith.singeli index 17126263..cef86a92 100644 --- a/src/singeli/src/dyarith.singeli +++ b/src/singeli/src/dyarith.singeli @@ -16,40 +16,54 @@ def ty_dbl{T & i32==T} = i64 def ty_dbl{T & isvec{T}} = [vcount{T}/2](ty_dbl{eltype{T}}) def dcast_i{x} = ext{ty_dbl{type{x}}, x} -# + & - -def arithChk1{F, w:T, x:T, r:T & match{F,__add}} = anyneg{(w^r) & (x^r)} -def arithChk1{F, w:T, x:T, r:T & match{F,__sub}} = anyneg{(w^x) & (w^r)} -def arithChk1{F, w:T, x:T, r:T & match{F,__add} & isvec{T} & width{eltype{T}}<=16} = any{__adds{w,x}!=r} -def arithChk1{F, w:T, x:T, r:T & match{F,__sub} & isvec{T} & width{eltype{T}}<=16} = any{__subs{w,x}!=r} +# get mask of first n items; n>0 & n=32} = maskstore{p,m,n,x} +def maskstoreF{p, m, n, x:T} = store{p, n, blendf{load{p,n}, x, m}} + + +# + & - +def arithChk1{F, M, w:T, x:T, r:T & match{F,__add}} = anyneg{M{(w^r) & (x^r)}} +def arithChk1{F, M, w:T, x:T, r:T & match{F,__sub}} = anyneg{M{(w^x) & (w^r)}} +def arithChk1{F, M, w:T, x:T, r:T & match{F,__add} & isvec{T} & width{eltype{T}}<=16} = any{M{__adds{w,x}!=r}} +def arithChk1{F, M, w:T, x:T, r:T & match{F,__sub} & isvec{T} & width{eltype{T}}<=16} = any{M{__subs{w,x}!=r}} + +def arithChk2{F, M, w:T, x:T, i & issigned{rootty{T}}} = { r:= F{w,x} - tup{r, arithChk1{F, w, x, r}} + tup{r, arithChk1{F, M, w, x, r}} } # ×/∧ -def arithChk2{F, w:T, x:T, i & match{F,__mul} & match{typekind{T},'primitive'}} = { +def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & match{typekind{T},'primitive'}} = { r:= F{dcast_i{w}, dcast_i{x}} tup{r, r!=ext{type{r}, trunc{T, r}}} } -def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i16==eltype{T}} = { +def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i16==eltype{T}} = { rl:= __mul {w,x} rh:= __mulhi{w,x} - tup{rl, any{rh != rl>>15}} + tup{rl, any{M{rh != rl>>15}}} } -def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i8==eltype{T}} = { +def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i8==eltype{T}} = { def wp = unpackQ{w, cast_v{T,broadcast{T,0}>w}} def xp = unpackQ{x, cast_v{T,broadcast{T,0}>x}} def rp = each{__mul, wp, xp} - def bad = each{{v}=>(v<<8)>>8 != v, rp} - tup{packQ{rp}, any{tupsel{0,bad}|tupsel{1,bad}}} + def bad = each{{v}=>cast_v{[16]i16,(v<<8)>>8 != v}, rp} + if (M{0}) { # masked check + tup{packQ{rp}, any{M{packQ{bad}}}} + } else { # unmasked check; can do check in a simpler way + tup{packQ{rp}, any{tupsel{0,bad}|tupsel{1,bad}}} + } } -def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i32==eltype{T}} = { +def arithChk2{F, M, w:T, x:T, i & match{F,__mul} & isvec{T} & i32==eltype{T}} = { max:= cast_v{[8]f32, broadcast{[8]u32, 0x4efffffe}} def cf32{x} = emit{[8]f32, '_mm256_cvtepi32_ps', x} f32mul:= cf32{w} * cf32{x} - tup{w*x, any{cast_v{[8]u32, emit{[8]f32, '_mm256_cmp_ps', abs{f32mul}, max, 29}}}} + tup{w*x, any{M{cast_v{[8]u32, emit{[8]f32, '_mm256_cmp_ps', abs{f32mul}, max, 29}}}}} # TODO fallback to the below if the above fails # def wp = unpackQ{w, broadcast{T, 0}} # def xp = unpackQ{x, broadcast{T, 0}} @@ -58,41 +72,44 @@ def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i32==eltype{T}} = { # def bad = each{{v}=>{ # ((cast_v{T2,v} + broadcast{T2,0x80000000}) ^ broadcast{T2, cast{i64,1}<<63}) > broadcast{T2, cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF}} # }, rp} - # tup{packQQ{each{{v} => v&broadcast{T2, 0xFFFFFFFF}, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}} + # tup{packQQ{each{{v} => v&broadcast{T2, 0xFFFFFFFF}, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}} TODO use M } # f64 -def arithChk3{F, w:T, x:T, i} = { - def r2 = arithChk2{F, w, x, i} +def arithChk3{F, M, w:T, x:T, i} = { + def r2 = arithChk2{F, M, w, x, i} if (rare{tupsel{1,r2}}) return{i} tupsel{0,r2} } -def arithChk3{F, w:T, x:T, i & f64==rootty{T}} = F{w,x} +def arithChk3{F, M, w:T, x:T, i & f64==rootty{T}} = F{w,x} + +def arithAny{VT, F, W, X, r, len} = { + def bam = vcount{VT} + def vv = len/bam + @forv{VT} (r over i from 0 to vv) r = arithChk3{F, {x}=>x, W{i}, X{i}, i*bam} + left:= len&(bam-1) + if (left!=0) { + m:= genmask{VT, left} + def mask{x:X} = x&cast_v{X,m} + def mask{x==0} = 1 + rv:= arithChk3{F, mask, W{vv}, X{vv}, vv*bam} + maskstoreF{cast_p{VT,r}, m, vv, rv} + } + len +} def arithAA{VT, F, w, x, r, len} = { - def bam = vcount{VT} - def vv = len/bam - @forv{VT} (w,x,r over i from 0 to vv) r = arithChk3{F, w, x, i*bam} - @for (w,x,r over i from vv*bam to len) r = arithChk3{F, w, x, i} - len + arithAny{VT, F, {i}=>load{cast_p{VT,w}, i}, {i}=>load{cast_p{VT,x}, i}, r, len} } def arithAS{VT, F, w, x, r, len} = { - def bam = vcount{VT} - def vv = len/bam xv:= broadcast{VT, x} - @forv{VT} (w,r over i from 0 to vv) r = arithChk3{F, w, xv, i*bam} - @for (w,r over i from vv*bam to len) r = arithChk3{F, w, x, i} - len + arithAny{VT, F, {i}=>load{cast_p{VT,w}, i}, {i}=>xv, r, len} } def arithSA{VT, F, w, x, r, len} = { - def bam = vcount{VT} - def vv = len/bam wv:= broadcast{VT, w} - @forv{VT} (x,r over i from 0 to vv) r = arithChk3{F, wv, x, i*bam} - @for (x,r over i from vv*bam to len) r = arithChk3{F, w, x, i} - len + arithAny{VT, F, {i}=>wv, {i}=>load{cast_p{VT,x}, i}, r, len} } diff --git a/src/singeli/src/sse3.singeli b/src/singeli/src/sse3.singeli new file mode 100644 index 00000000..6f2762bb --- /dev/null +++ b/src/singeli/src/sse3.singeli @@ -0,0 +1,22 @@ +def wgen128{F} = { + def r{T} = 0 + def r{T & width{T}==128} = F{eltype{T}} + def r{T,w} = 0 + def r{T,w & width{T}==128} = F{eltype{T}} & (width{eltype{T}}==w) + r +} +def w128{T} = width{T}==128 +def w128{T, w} = 0 +def w128{T, w & width{T}==128 & width{eltype{T}}==w} = 1 +def w128i = wgen128{{T} => isint{T}} +def w128s = wgen128{{T} => isint{T} & issigned{T}} +def w128u = wgen128{{T} => isint{T} & isunsigned{T}} +def w128f = wgen128{{T} => isfloat{T}} + +def cast_v{T, x:S & match{show{T},show{type{x}}}} = x +def cast_v{R, x:S & w128i{R } & w128{S}} = emit{R, '(__m128i)', x} +def cast_v{R, x:S & w128f{R,32} & w128{S}} = emit{R, '(__m128)', x} +def cast_v{R, x:S & w128f{R,64} & w128{S}} = emit{R, '(__m128d)', x} +def v2i{x:T & w128{T}} = cast_v{[16]u8, x} # for compact casting for the annoying intrinsic type system +def v2f{x:T & w128{T}} = cast_v{[4]f32, x} +def v2d{x:T & w128{T}} = cast_v{[2]f64, x} \ No newline at end of file