singeli + & - & ×
This commit is contained in:
parent
0c5b98772c
commit
1770251bad
2
makefile
2
makefile
@ -142,7 +142,7 @@ preSingeliBin:
|
||||
@mv BQN obj/presingeli/BQN
|
||||
|
||||
|
||||
gen-singeli: ${addprefix src/singeli/gen/, cmp.c}
|
||||
gen-singeli: ${addprefix src/singeli/gen/, cmp.c dyarith.c}
|
||||
@echo $(postmsg)
|
||||
src/singeli/gen/%.c: src/singeli/src/%.singeli preSingeliBin
|
||||
@echo $< | cut -c 17- | sed 's/^/ /'
|
||||
|
||||
@ -14,7 +14,10 @@ tests ← •FLines path∾"/test/cases/prim.bqn"
|
||||
(⊑'%'⊸∊)◶{𝕤
|
||||
•Out 𝕩
|
||||
"src/gen/interp" •FChars ⟨1,path,𝕩⟩ •Import "cc.bqn"
|
||||
(×⊑)◶@‿{𝕤⋄•Out "############ Failed to compile! ############" ⋄ •Out¨1↓𝕩}{env⇐<"PATH="∾envP}•SH"make"‿"f=-DPRECOMP"‿"t=precomp"‿"c"
|
||||
# make ← "make"‿"singeli=1"‿"t=precomp_si"‿"f=-DPRECOMP -march=native"‿"c"
|
||||
# make ← "make"‿"t=precomp_32"‿"f=-DPRECOMP -m32"‿"lf=-m32"‿"c"
|
||||
make ← "make"‿"f=-DPRECOMP"‿"t=precomp"‿"c"
|
||||
(×⊑)◶@‿{𝕤⋄•Out "############ Failed to compile! ############" ⋄ •Out¨1↓𝕩}{env⇐<"PATH="∾envP}•SH make
|
||||
code‿out‿err←•SH⟨"./BQN"⟩
|
||||
•Out out
|
||||
{𝕤⋄•Out"exit code "∾(•Repr code) ⋄ •Out err}⍟(×code) err
|
||||
|
||||
@ -2,6 +2,16 @@
|
||||
#include "../utils/each.h"
|
||||
#include <math.h>
|
||||
|
||||
#if SINGELI
|
||||
#define BCALL(N, X) N(b(X))
|
||||
#define interp_f64(X) b(X).f
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
#include "../singeli/gen/dyarith.c"
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#define P2(N) { if(isArr(w)|isArr(x)) { \
|
||||
SLOWIF((!isArr(w) || TI(w,elType)!=el_B) && (!isArr(x) || TI(x,elType)!=el_B)) SLOW2("arithd " #N, w, x); \
|
||||
return arith_recd(N##_c2, w, x); \
|
||||
@ -57,10 +67,10 @@
|
||||
#define PI8(N) i8* N##p = i8any_ptr (N);
|
||||
#define PI16(N) i16* N##p = i16any_ptr(N);
|
||||
#define PI32(N) i32* N##p = i32any_ptr(N);
|
||||
#define RI8(A) i8* rp; B r=m_i8arrc (&rp, A);
|
||||
#define RI16(A) i16* rp; B r=m_i16arrc(&rp, A);
|
||||
#define RI32(A) i32* rp; B r=m_i32arrc(&rp, A);
|
||||
#define RF(A) f64* rp; B r=m_f64arrc(&rp, A);
|
||||
#define Ri8(A) i8* rp; B r=m_i8arrc (&rp, A);
|
||||
#define Ri16(A) i16* rp; B r=m_i16arrc(&rp, A);
|
||||
#define Ri32(A) i32* rp; B r=m_i32arrc(&rp, A);
|
||||
#define Rf64(A) f64* rp; B r=m_f64arrc(&rp, A);
|
||||
|
||||
static NOINLINE u8 iMakeEq(B* w, B* x, u8 we, u8 xe) {
|
||||
B s = we<xe?*w:*x;
|
||||
@ -80,7 +90,7 @@
|
||||
u64* bp = bitarr_ptr(b);
|
||||
usz ia = a(b)->ia;
|
||||
|
||||
bool b0 = bp[0]&1;
|
||||
bool b0 = ia? bp[0]&1 : 0;
|
||||
bool both = false;
|
||||
for (usz i = 0; i < ia; i++) if (bitp_get(bp,i) != b0) { both=true; break; }
|
||||
|
||||
@ -99,7 +109,7 @@
|
||||
rp[i] = EXPR; \
|
||||
} \
|
||||
}
|
||||
#define DOI16(EXPR,A,W,X,BASE) { RI16(A) \
|
||||
#define DOI16(EXPR,A,W,X,BASE) { Ri16(A) \
|
||||
for (usz i = 0; i < ia; i++) { \
|
||||
i32 wv = W; i32 xv = X; i32 rv = EXPR; \
|
||||
if (RARE(rv!=(i16)rv)) { dec(r); goto BASE; } \
|
||||
@ -107,7 +117,7 @@
|
||||
} \
|
||||
dec(w); dec(x); return r; \
|
||||
}
|
||||
#define DOI8(EXPR,A,W,X,BASE) { RI8(A) \
|
||||
#define DOI8(EXPR,A,W,X,BASE) { Ri8(A) \
|
||||
for (usz i = 0; i < ia; i++) { \
|
||||
i16 wv = W; i16 xv = X; i16 rv = EXPR; \
|
||||
if (RARE(rv!=(i8)rv)) { dec(r); goto BASE; } \
|
||||
@ -115,7 +125,7 @@
|
||||
} \
|
||||
dec(w); dec(x); return r; \
|
||||
}
|
||||
#define DOI32(EXPR,A,W,X,BASE) { RI32(A) \
|
||||
#define DOI32(EXPR,A,W,X,BASE) { Ri32(A) \
|
||||
for (usz i = 0; i < ia; i++) { \
|
||||
i64 wv = W; i64 xv = X; i64 rv = EXPR; \
|
||||
if (RARE(rv!=(i32)rv)) { dec(r); goto BASE; } \
|
||||
@ -125,7 +135,6 @@
|
||||
}
|
||||
|
||||
static B bitAA0(B w, B x, usz ia) { UD; }
|
||||
static B bitAN0(B a, B b, usz ia) { UD; }
|
||||
static NOINLINE B bitAA1(B w, B x, usz ia) {
|
||||
u64* rp; B r = m_bitarrc(&rp, x);
|
||||
u64* wp=bitarr_ptr(w); u64* xp=bitarr_ptr(x);
|
||||
@ -138,20 +147,8 @@
|
||||
for (usz i=0; i<BIT_N(ia); i++) rp[i] = wp[i]&xp[i];
|
||||
dec(w); dec(x); return r;
|
||||
}
|
||||
static NOINLINE B bitAN1(B a, B b, usz ia) {
|
||||
u64* rp; B r = m_bitarrc(&rp, a);
|
||||
u64* ap=bitarr_ptr(a); u64 bv = bitx(b);
|
||||
for (usz i=0; i<BIT_N(ia); i++) rp[i] = ap[i]|bv;
|
||||
dec(a); return r;
|
||||
}
|
||||
static NOINLINE B bitAN2(B a, B b, usz ia) {
|
||||
u64* rp; B r = m_bitarrc(&rp, a);
|
||||
u64* ap=bitarr_ptr(a); u64 bv = bitx(b);
|
||||
for (usz i=0; i<BIT_N(ia); i++) rp[i] = ap[i]&bv;
|
||||
dec(a); return r;
|
||||
}
|
||||
|
||||
#define GC2i(SYMB, NAME, EXPR, EXTRA1, EXTRA2, BIT) \
|
||||
#define GC2i(SYMB, NAME, EXPR, EXTRA1, EXTRA2, BIT, SI_AA, DO_AS, DO_SA) \
|
||||
static NOINLINE B NAME##_c2_arr(B t, B w, B x); \
|
||||
B NAME##_c2(B t, B w, B x) { \
|
||||
if (isF64(w) & isF64(x)) {f64 wv=w.f,xv=x.f;return m_f64(EXPR);} \
|
||||
@ -173,54 +170,36 @@
|
||||
} \
|
||||
if ((we==el_i32|we==el_f64)&(xe==el_i32|xe==el_f64)) { \
|
||||
bool wei = we==el_i32; bool xei = xe==el_i32; \
|
||||
if (wei&xei) {PI32(w)PI32(x)DOI32(EXPR,w,wp[i],xp[i],aaB);}\
|
||||
aaB:; RF(x) \
|
||||
if (wei&xei) {PI32(w)PI32(x)SI_AA(NAME,i32,base) DOI32(EXPR,w,wp[i],xp[i],aaB);}\
|
||||
aaB:; Rf64(x) \
|
||||
if (wei) { PI32(w) \
|
||||
if (xei) { PI32(x) DOF(EXPR,w,wp[i],xp[i]) } \
|
||||
else { PF (x) DOF(EXPR,w,wp[i],xp[i]) } \
|
||||
} else { PF(w) \
|
||||
if (xei) { PI32(x) DOF(EXPR,w,wp[i],xp[i]) } \
|
||||
else { PF (x) DOF(EXPR,w,wp[i],xp[i]) } \
|
||||
else { PF (x) SI_AA(NAME,f64,base) DOF(EXPR,w,wp[i],xp[i]) }\
|
||||
} \
|
||||
dec(w); dec(x); return num_squeeze(r); \
|
||||
} \
|
||||
if(we==el_i8 & xe==el_i8 ) { PI8 (w) PI8 (x) DOI8 (EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i16 & xe==el_i16) { PI16(w) PI16(x) DOI16(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i8 & xe==el_i8 ) { PI8 (w) PI8 (x) SI_AA(NAME, i8,base) DOI8 (EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i16 & xe==el_i16) { PI16(w) PI16(x) SI_AA(NAME,i16,base) DOI16(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i8 & xe==el_i32) { PI8 (w) PI32(x) DOI32(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i32 & xe==el_i8 ) { PI32(w) PI8 (x) DOI32(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i16 & xe==el_i32) { PI16(w) PI32(x) DOI32(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i32 & xe==el_i16) { PI32(w) PI16(x) DOI32(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i16 & xe==el_i8 ) { PI16(w) PI8 (x) DOI16(EXPR,w,wp[i],xp[i],base); } \
|
||||
if(we==el_i8 & xe==el_i16) { PI8 (w) PI16(x) DOI16(EXPR,w,wp[i],xp[i],base); } \
|
||||
} else if (isF64(w)&isArr(x)) { usz ia = a(x)->ia; u8 xe = TI(x,elType); \
|
||||
if (xe==el_bit) { \
|
||||
if (BIT && q_fbit(w.f)) return bitAN##BIT(x,w,ia); \
|
||||
return bit_sel1Fn(NAME##_c2,w,x,1); \
|
||||
} \
|
||||
if (xe==el_i8 && q_i8 (w)) { PI8 (x) i8 wc=o2iu(w); DOI8 (EXPR,x,wc,xp[i],na8B ) } na8B :; \
|
||||
if (xe==el_i16 && q_i16(w)) { PI16(x) i16 wc=o2iu(w); DOI16(EXPR,x,wc,xp[i],na16B) } na16B:; \
|
||||
if (xe==el_i32 && q_i32(w)) { PI32(x) i32 wc=o2iu(w); DOI32(EXPR,x,wc,xp[i],na32B) } na32B:; \
|
||||
if (xe==el_i32) { RF(x) PI32(x) DOF(EXPR,w,w.f,xp[i]) dec(x); return num_squeeze(r); } \
|
||||
if (xe==el_f64) { RF(x) PF (x) DOF(EXPR,w,w.f,xp[i]) dec(x); return num_squeeze(r); } \
|
||||
} else if (isF64(x)&isArr(w)) { usz ia = a(w)->ia; u8 we = TI(w,elType); \
|
||||
if (we==el_bit) { \
|
||||
if (BIT && q_fbit(x.f)) return bitAN##BIT(w,x,ia); \
|
||||
else return bit_sel1Fn(NAME##_c2,w,x,0); \
|
||||
} \
|
||||
if (we==el_i8 && q_i8 (x)) { PI8 (w) i8 xc=o2iu(x); DOI8 (EXPR,w,wp[i],xc,an8B ) } an8B :; \
|
||||
if (we==el_i16 && q_i16(x)) { PI16(w) i16 xc=o2iu(x); DOI16(EXPR,w,wp[i],xc,an16B) } an16B:; \
|
||||
if (we==el_i32 && q_i32(x)) { PI32(w) i32 xc=o2iu(x); DOI32(EXPR,w,wp[i],xc,an32B) } an32B:; \
|
||||
if (we==el_i32) { RF(w) PI32(w) DOF(EXPR,x,wp[i],x.f) dec(w); return num_squeeze(r); } \
|
||||
if (we==el_f64) { RF(w) PF (w) DOF(EXPR,x,wp[i],x.f) dec(w); return num_squeeze(r); } \
|
||||
} \
|
||||
base: P2(NAME) \
|
||||
} \
|
||||
thrM(SYMB ": Unexpected argument types"); \
|
||||
} \
|
||||
else if (isF64(w)&isArr(x)) { usz ia=a(x)->ia; u8 xe=TI(x,elType); DO_SA(NAME,EXPR) } \
|
||||
else if (isF64(x)&isArr(w)) { usz ia=a(w)->ia; u8 we=TI(w,elType); DO_AS(NAME,EXPR) } \
|
||||
base: P2(NAME) \
|
||||
} \
|
||||
thrM(SYMB ": Unexpected argument types"); \
|
||||
}
|
||||
#else // if !TYPED_ARITH
|
||||
#define GC2i(SYMB, NAME, EXPR, EXTRA, BIT) B NAME##_c2(B t, B w, B x) { \
|
||||
#define GC2i(SYMB, NAME, EXPR, EXTRA1, EXTRA2, BIT, SI_AA, SI_AS, SI_SA) B NAME##_c2(B t, B w, B x) { \
|
||||
if (isF64(w) & isF64(x)) { f64 wv=w.f; f64 xv=x.f; return m_f64(EXPR); } \
|
||||
EXTRA \
|
||||
EXTRA1 EXTRA2 \
|
||||
P2(NAME) \
|
||||
thrM(SYMB ": Unexpected argument types"); \
|
||||
}
|
||||
@ -238,6 +217,53 @@ static f64 pfmod(f64 a, f64 b) {
|
||||
return r;
|
||||
}
|
||||
|
||||
#define NO_SI_AA(N,S,BASE)
|
||||
#define REG_SA(NAME, EXPR) \
|
||||
if (xe==el_bit) return bit_sel1Fn(NAME##_c2,w,x,1); \
|
||||
if (xe==el_i8 && q_i8 (w)) { PI8 (x) i8 wc=o2iu(w); DOI8 (EXPR,x,wc,xp[i],sa8B ) } sa8B :; \
|
||||
if (xe==el_i16 && q_i16(w)) { PI16(x) i16 wc=o2iu(w); DOI16(EXPR,x,wc,xp[i],sa16B) } sa16B:; \
|
||||
if (xe==el_i32 && q_i32(w)) { PI32(x) i32 wc=o2iu(w); DOI32(EXPR,x,wc,xp[i],sa32B) } sa32B:; \
|
||||
if (xe==el_f64) { Rf64(x) PF(x) DOF(EXPR,w,w.f,xp[i]) dec(x); return num_squeeze(r); }
|
||||
#define REG_AS(NAME, EXPR) \
|
||||
if (we==el_bit) return bit_sel1Fn(NAME##_c2,w,x,0); \
|
||||
if (we==el_i8 && q_i8 (x)) { PI8 (w) i8 xc=o2iu(x); DOI8 (EXPR,w,wp[i],xc,as8B ) } as8B :; \
|
||||
if (we==el_i16 && q_i16(x)) { PI16(w) i16 xc=o2iu(x); DOI16(EXPR,w,wp[i],xc,as16B) } as16B:; \
|
||||
if (we==el_i32 && q_i32(x)) { PI32(w) i32 xc=o2iu(x); DOI32(EXPR,w,wp[i],xc,as32B) } as32B:; \
|
||||
if (we==el_f64) { Rf64(w) PF(w) DOF(EXPR,x,wp[i],x.f) dec(w); return num_squeeze(r); }
|
||||
|
||||
#if SINGELI
|
||||
static void* tyany_ptr(B x) { // TODO extract to some header file
|
||||
u8 t = v(x)->type;
|
||||
return IS_SLICE(t)? c(TySlice,x)->a : c(TyArr,x)->a;
|
||||
}
|
||||
#define SI_AA(N,S,BASE) R##S(x); usz rlen=avx2_##N##AA##_##S((void*)wp, (void*)xp, (void*)rp, ia); if(RARE(rlen!=ia)) goto BASE; dec(w);dec(x);return r;
|
||||
#define SI_SA_I(N,S,W,BASE) R##S(x); usz rlen=avx2_##N##SA##_##S((W).u, (void*)xp, (void*)rp, ia); if(RARE(rlen!=ia)) goto BASE; dec(w);dec(x);return r;
|
||||
#define SI_AS_I(N,S,X,BASE) R##S(w); usz rlen=avx2_##N##AS##_##S((void*)wp, (X).u, (void*)rp, ia); if(RARE(rlen!=ia)) goto BASE; dec(w);dec(x);return r;
|
||||
#define SI_SA(NAME, EXPR) \
|
||||
void* xp = tyany_ptr(x); \
|
||||
switch(xe) { default: UD; \
|
||||
case el_bit: return bit_sel1Fn(NAME##_c2,w,x,1); \
|
||||
case el_i8 : { SI_SA_I(NAME, i8,w,saBad) } \
|
||||
case el_i16: { SI_SA_I(NAME,i16,w,saBad) } \
|
||||
case el_i32: { SI_SA_I(NAME,i32,w,saBad) } \
|
||||
case el_f64: { SI_SA_I(NAME,f64,w,saBad) } \
|
||||
case el_c8: case el_c16: case el_c32: case el_B:; /*fallthrough*/ \
|
||||
} saBad:;
|
||||
#define SI_AS(NAME, EXPR) \
|
||||
void* wp = tyany_ptr(w); \
|
||||
switch(we) { default: UD; \
|
||||
case el_bit: return bit_sel1Fn(NAME##_c2,w,x,0); \
|
||||
case el_i8 : { SI_AS_I(NAME, i8,x,asBad) } \
|
||||
case el_i16: { SI_AS_I(NAME,i16,x,asBad) } \
|
||||
case el_i32: { SI_AS_I(NAME,i32,x,asBad) } \
|
||||
case el_f64: { SI_AS_I(NAME,f64,x,asBad) } \
|
||||
case el_c8: case el_c16: case el_c32: case el_B:; /*fallthrough*/ \
|
||||
} asBad:;
|
||||
#else
|
||||
#define SI_AA NO_SI_AA
|
||||
#define SI_AS REG_AS
|
||||
#define SI_SA REG_SA
|
||||
#endif
|
||||
|
||||
GC2i("+", add, wv+xv, {
|
||||
if (isC32(w) & isF64(x)) { u64 r = (u64)(o2cu(w)+o2i64(x)); if(r>CHR_MAX)thrM("+: Invalid character"); return m_c32((u32)r); }
|
||||
@ -256,7 +282,8 @@ GC2i("+", add, wv+xv, {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
}, 0)
|
||||
}, 0, SI_AA, SI_AS, SI_SA)
|
||||
|
||||
GC2i("-", sub, wv-xv, {
|
||||
if (isC32(w) & isF64(x)) { u64 r = (u64)((i32)o2cu(w)-o2i64(x)); if(r>CHR_MAX)thrM("-: Invalid character"); return m_c32((u32)r); }
|
||||
if (isC32(w) & isC32(x)) return m_f64((i32)(u32)w.u - (i32)(u32)x.u);
|
||||
@ -284,16 +311,17 @@ GC2i("-", sub, wv-xv, {
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 0)
|
||||
}, 0, SI_AA, SI_AS, SI_SA)
|
||||
|
||||
GC2i("¬", not, 1+wv-xv, {
|
||||
if (isC32(w) & isF64(x)) { u64 r = (u64)(1+(i32)o2cu(w)-o2i64(x)); if(r>CHR_MAX)thrM("¬: Invalid character"); return m_c32((u32)r); }
|
||||
if (isC32(w) & isC32(x)) return m_f64(1 + (i32)(u32)w.u - (i32)(u32)x.u);
|
||||
}, {}, 0)
|
||||
GC2i("×", mul, wv*xv, {}, {}, 2)
|
||||
GC2i("∧", and, wv*xv, {}, {}, 2)
|
||||
GC2i("∨", or , (wv+xv)-(wv*xv), {}, {}, 1)
|
||||
GC2i("⌊", floor, wv>xv?xv:wv, {}, {}, 2) // optimizer optimizes out the fallback mess
|
||||
GC2i("⌈", ceil , wv>xv?wv:xv, {}, {}, 1)
|
||||
}, {}, 0, NO_SI_AA, REG_AS, REG_SA)
|
||||
GC2i("×", mul, wv*xv, {}, {}, 2, SI_AA, SI_AS, SI_SA)
|
||||
GC2i("∧", and, wv*xv, {}, {}, 2, NO_SI_AA, REG_AS, REG_SA)
|
||||
GC2i("∨", or , (wv+xv)-(wv*xv), {}, {}, 1, NO_SI_AA, REG_AS, REG_SA)
|
||||
GC2i("⌊", floor, wv>xv?xv:wv, {}, {}, 2, NO_SI_AA, REG_AS, REG_SA) // optimizer optimizes out the fallback mess
|
||||
GC2i("⌈", ceil , wv>xv?wv:xv, {}, {}, 1, NO_SI_AA, REG_AS, REG_SA)
|
||||
|
||||
GC2f("÷", div , w.f/x.f, {})
|
||||
GC2f("⋆", pow , pow(w.f, x.f), {})
|
||||
|
||||
2
src/h.h
2
src/h.h
@ -487,7 +487,7 @@ typedef B (*M2C2)(Md2D*, B, B);
|
||||
#define IGetU(X,N) ({ Arr* x_ = a(X); TIv(x_,getU)(x_,N); })
|
||||
#define GetU(X,N) X##_getU(X##_arrU,N)
|
||||
#define SGet(X) Arr* X##_arr = a(X); AS2B X##_get = TIv(X##_arr,get);
|
||||
#define IGet(X,N)({ Arr* x_ = a(X); TIv(x_,get)(x_,N); })
|
||||
#define IGet(X,N) ({ Arr* x_ = a(X); TIv(x_,get)(x_,N); })
|
||||
#define Get(X,N) X##_get(X##_arr,N)
|
||||
|
||||
|
||||
|
||||
@ -5,29 +5,46 @@ def r_i2d{a:T} = emit{[4]f64, '_mm256_castsi256_pd', a}
|
||||
def r_f2d{a:T} = emit{[4]f64, '_mm256_castps_pd', a}
|
||||
def r_d2f{a:T} = emit{[8]f32, '_mm256_castpd_ps', a}
|
||||
def r_i2f{a:T} = emit{[8]f32, '_mm256_castsi256_ps', a}
|
||||
|
||||
# various utilities
|
||||
def isunsigned{T} = isint{T} & ~issigned{T}
|
||||
def isintv{T} = isint{eltype{T}}
|
||||
def isf64v{T} = f64==eltype{T}
|
||||
def isfloatv{T} = isfloat{eltype{T}}
|
||||
def issignedv{T} = issigned{eltype{T}}
|
||||
def isunsignedv{T} = isunsigned{eltype{T}}
|
||||
def w256{T} = width{T}==256
|
||||
def isintv{T,w} = isintv{T} & width{eltype{T}}==w
|
||||
# load
|
||||
def cast_vp{T, a & w256{T}} = emit{*T, '(void*)', a}
|
||||
def vload{a:T, n & w256{eltype{T}} & isintv{eltype{T}}} = emit{eltype{T}, '_mm256_loadu_si256', emit{T, 'op +', a, n}}
|
||||
def vload{a:T, n & w256{eltype{T}} & isfloatv{eltype{T}}} = r_i2d{vload{cast_vp{[4]u64, a}, n}} # TODO use the proper float load
|
||||
|
||||
def cast_vp{T, x & w256{T}} = emit{*T, '(void*)', x}
|
||||
def cast_v{R, x:S & w256{R} & w256{S} & isf64v{S} & isintv{R}} = r_d2i{R, x}
|
||||
def cast_v{R, x:S & w256{R} & w256{S} & isintv{S} & isf64v{R}} = r_i2d{x}
|
||||
def cast_v{R, x:S & w256{R} & w256{S} & isintv{S} & isintv{R}} = emit{R, '', x}
|
||||
def cast_v{R, x:S & w256{R} & w256{S} & isf64v{S} & isf64v{R}} = emit{R, '', x}
|
||||
def ty_vu{T & w256{T} & issignedv{T}} = [vcount{T}](ty_iu{eltype{T}})
|
||||
def ty_vs{T & w256{T} & isunsignedv{T}} = [vcount{T}](ty_is{eltype{T}})
|
||||
def forv{T & w256{T}} = forc{{v}=>cast_vp{T,v}}
|
||||
|
||||
# load & store
|
||||
def load {a:T, n & w256{eltype{T}} & isintv{eltype{T}}} = emit{eltype{T}, '_mm256_loadu_si256', emit{T, 'op +', a, n}}
|
||||
def loada{a:T, n & w256{eltype{T}} & isintv{eltype{T}}} = emit{eltype{T}, '_mm256_load_si256', emit{T, 'op +', a, n}}
|
||||
def load {a:T, n & w256{eltype{T}} & isf64v{eltype{T}}} = emit{eltype{T}, '_mm256_loadu_pd', cast_p{f64, emit{T, 'op +', a, n}}}
|
||||
def loada{a:T, n & w256{eltype{T}} & isf64v{eltype{T}}} = emit{eltype{T}, '_mm256_load_pd', cast_p{f64, emit{T, 'op +', a, n}}}
|
||||
def store {a:T, n, v & w256{eltype{T}} & isintv{eltype{T}}} = emit{void, '_mm256_storeu_si256', emit{T, 'op +', a, n}, v}
|
||||
def storea{a:T, n, v & w256{eltype{T}} & isintv{eltype{T}}} = emit{void, '_mm256_store_si256', emit{T, 'op +', a, n}, v}
|
||||
def store {a:T, n, v & w256{eltype{T}} & isf64v{eltype{T}}} = emit{void, '_mm256_storeu_pd', cast_p{f64, emit{T, 'op +', a, n}}, v}
|
||||
def storea{a:T, n, v & w256{eltype{T}} & isf64v{eltype{T}}} = emit{void, '_mm256_store_pd', cast_p{f64, emit{T, 'op +', a, n}}, v}
|
||||
|
||||
# broadcast; TODO restrict v
|
||||
def broadcast{T, v & w256{T} & isintv{T, 8}} = emit{T, '_mm256_set1_epi8', v}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 16}} = emit{T, '_mm256_set1_epi16', v}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 32}} = emit{T, '_mm256_set1_epi32', v}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 64}} = emit{T, '_mm256_set1_epi64x',v}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 8}} = emit{T, '_mm256_set1_epi8', ext{eltype{T},v}}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 16}} = emit{T, '_mm256_set1_epi16', ext{eltype{T},v}}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 32}} = emit{T, '_mm256_set1_epi32', ext{eltype{T},v}}
|
||||
def broadcast{T, v & w256{T} & isintv{T, 64}} = emit{T, '_mm256_set1_epi64x',ext{eltype{T},v}}
|
||||
def broadcast{T, v & w256{T} & eltype{T}==f64} = emit{T, '_mm256_set1_pd', v}
|
||||
|
||||
def __xor{a:T, b:T & w256{T} & isunsignedv{T}} = r_f2i{T, emit{[8]f32, '_mm256_xor_ps', r_i2f{a}, r_i2f{b}}}
|
||||
def __and{a:T, b:T & w256{T} & isunsignedv{T}} = r_f2i{T, emit{[8]f32, '_mm256_and_ps', r_i2f{a}, r_i2f{b}}}
|
||||
def __or {a:T, b:T & w256{T} & isunsignedv{T}} = r_f2i{T, emit{[8]f32, '_mm256_or_ps', r_i2f{a}, r_i2f{b}}}
|
||||
def __xor{a:T, b:T & w256{T} & isintv{T}} = r_f2i{T, emit{[8]f32, '_mm256_xor_ps', r_i2f{a}, r_i2f{b}}}
|
||||
def __and{a:T, b:T & w256{T} & isintv{T}} = r_f2i{T, emit{[8]f32, '_mm256_and_ps', r_i2f{a}, r_i2f{b}}}
|
||||
def __or {a:T, b:T & w256{T} & isintv{T}} = r_f2i{T, emit{[8]f32, '_mm256_or_ps', r_i2f{a}, r_i2f{b}}}
|
||||
|
||||
def __not{a:T & w256{T} & isunsignedv{T}} = a ^ broadcast{T, ~cast{eltype{T},0}}
|
||||
|
||||
@ -40,5 +57,11 @@ def __ge{a:T,b:T & T==[4]f64} = fcmpAVX{a,b,29}
|
||||
def __lt{a:T,b:T & T==[4]f64} = fcmpAVX{a,b,17}
|
||||
def __le{a:T,b:T & T==[4]f64} = fcmpAVX{a,b,18}
|
||||
|
||||
def getmask{x:[8]u32} = emit{u8, '_mm256_movemask_ps', r_i2f{x}}
|
||||
def getmask{x:[4]u64} = emit{u8, '_mm256_movemask_pd', r_i2d{x}}
|
||||
# minimal f32 things
|
||||
def abs{a:[8]f32} = emit{[8]f32, '_mm256_and_ps', a, r_i2f{broadcast{[8]u32, 0x7FFFFFFF}}}
|
||||
|
||||
|
||||
def getmask{x:T & w256{T} & 32==width{eltype{T}}} = emit{u8, '_mm256_movemask_ps', r_i2f{x}}
|
||||
def getmask{x:T & w256{T} & 64==width{eltype{T}}} = emit{u8, '_mm256_movemask_pd', r_i2d{x}}
|
||||
def any{x:T & w256{T} & isintv{T}} = getmask{x}!=0 # assumes elements of x all have equal bits (avx2 utilizes this for 16 bits)
|
||||
def anyneg{x:T & w256{T} & issignedv{T}} = getmask{x}!=0
|
||||
@ -10,9 +10,11 @@ def max{a:T,b:T & T==[ 8]i32} = emit{T, '_mm256_max_epi32', a, b}; def max{a:T,b
|
||||
def __eq{a:T,b:T & T==[32]i8 } = emit{[32]u8, '_mm256_cmpeq_epi8', a, b}
|
||||
def __eq{a:T,b:T & T==[16]i16} = emit{[16]u16, '_mm256_cmpeq_epi16', a, b}
|
||||
def __eq{a:T,b:T & T==[ 8]i32} = emit{[ 8]u32, '_mm256_cmpeq_epi32', a, b}
|
||||
def __eq{a:T,b:T & T==[ 4]i64} = emit{[ 4]u64, '_mm256_cmpeq_epi64', a, b}
|
||||
def __gt{a:T,b:T & T==[32]i8 } = emit{[32]u8, '_mm256_cmpgt_epi8', a, b}
|
||||
def __gt{a:T,b:T & T==[16]i16} = emit{[16]u16, '_mm256_cmpgt_epi16', a, b}
|
||||
def __gt{a:T,b:T & T==[ 8]i32} = emit{[ 8]u32, '_mm256_cmpgt_epi32', a, b}
|
||||
def __gt{a:T,b:T & T==[ 4]i64} = emit{[ 4]u64, '_mm256_cmpgt_epi64', a, b}
|
||||
def __lt{a:T,b:T & w256{T} & issignedv{T}} = b>a
|
||||
def __ge{a:T,b:T & w256{T} & issignedv{T}} = ~(b>a)
|
||||
def __le{a:T,b:T & w256{T} & issignedv{T}} = ~(a>b)
|
||||
@ -20,6 +22,7 @@ def __le{a:T,b:T & w256{T} & issignedv{T}} = ~(a>b)
|
||||
def __eq{a:T,b:T & T==[32]u8 } = emit{[32]u8, '_mm256_cmpeq_epi8', a, b}
|
||||
def __eq{a:T,b:T & T==[16]u16} = emit{[16]u16, '_mm256_cmpeq_epi16', a, b}
|
||||
def __eq{a:T,b:T & T==[ 8]u32} = emit{[ 8]u32, '_mm256_cmpeq_epi32', a, b}
|
||||
def __eq{a:T,b:T & T==[ 4]u64} = emit{[ 4]u64, '_mm256_cmpeq_epi64', a, b}
|
||||
def __le{a:T,b:T & w256{T} & isunsignedv{T}} = a==min{a,b}
|
||||
def __ge{a:T,b:T & w256{T} & isunsignedv{T}} = a==max{a,b}
|
||||
def __lt{a:T,b:T & w256{T} & isunsignedv{T}} = ~(a>=b)
|
||||
@ -27,8 +30,60 @@ def __gt{a:T,b:T & w256{T} & isunsignedv{T}} = ~(a<=b)
|
||||
# rest of comparison
|
||||
def __ne{a:T,b:T & w256{T} & isintv{T}} = ~(b==a)
|
||||
|
||||
def getmask{x:[32]u8} = emit{u32, '_mm256_movemask_epi8', x}
|
||||
def getmask{x:[16]u16} = {
|
||||
# shift
|
||||
def __shl{a:T,b & w256{T} & isintv{T} & 16==width{eltype{T}}} = emit{T, '_mm256_slli_epi16', a, b}
|
||||
def __shl{a:T,b & w256{T} & isintv{T} & 32==width{eltype{T}}} = emit{T, '_mm256_slli_epi32', a, b}
|
||||
def __shl{a:T,b & w256{T} & isintv{T} & 64==width{eltype{T}}} = emit{T, '_mm256_slli_epi64', a, b}
|
||||
def __shr{a:T,b & w256{T} & u16==eltype{T}} = emit{T, '_mm256_srli_epi16', a, b}
|
||||
def __shr{a:T,b & w256{T} & u32==eltype{T}} = emit{T, '_mm256_srli_epi32', a, b}
|
||||
def __shr{a:T,b & w256{T} & u64==eltype{T}} = emit{T, '_mm256_srli_epi64', a, b}
|
||||
def __shr{a:T,b & w256{T} & i16==eltype{T}} = emit{T, '_mm256_srai_epi16', a, b}
|
||||
def __shr{a:T,b & w256{T} & i32==eltype{T}} = emit{T, '_mm256_srai_epi32', a, b}
|
||||
# no 64-bit arithmetic shift :/
|
||||
|
||||
# questionable pack/unpack
|
||||
def unpackQ{a:[32]i8, b:[32]i8 } = { tup{emit{[16]i16, '_mm256_unpacklo_epi8', a, b}, emit{[16]i16, '_mm256_unpackhi_epi8', a, b}}}
|
||||
def unpackQ{a:[16]i16, b:[16]i16} = { tup{emit{[ 8]i32, '_mm256_unpacklo_epi16', a, b}, emit{[ 8]i32, '_mm256_unpackhi_epi16', a, b}}}
|
||||
def unpackQ{a:[ 8]i32, b:[ 8]i32} = { tup{emit{[ 4]i64, '_mm256_unpacklo_epi32', a, b}, emit{[ 4]i64, '_mm256_unpackhi_epi32', a, b}}}
|
||||
# saturate the argument
|
||||
def packQ{a:T,b:T & T==[16]i16} = emit{[32]i8, '_mm256_packs_epi16', a, b}
|
||||
def packQ{a:T,b:T & T==[ 8]i32} = emit{[16]i16, '_mm256_packs_epi32', a, b}
|
||||
def packQ{a} = packQ{tupsel{0,a}, tupsel{1,a}}
|
||||
# super questionable pack - takes assumes high halves are zero
|
||||
def packQQ{a:T,b:T & T==[4]i64} = emit{[8]i32, '_mm256_shuffle_epi32', a, 4b1120} | emit{[8]i32, '_mm256_shuffle_epi32', b, 4b2011}
|
||||
def packQQ{a} = packQQ{tupsel{0,a}, tupsel{1,a}}
|
||||
|
||||
# arith
|
||||
def __adds{a:T,b:T & T==[16]i16} = emit{T, '_mm256_adds_epi16', a, b}
|
||||
def __adds{a:T,b:T & T==[16]u16} = emit{T, '_mm256_adds_epu16', a, b}
|
||||
def __adds{a:T,b:T & T==[32]i8 } = emit{T, '_mm256_adds_epi8', a, b}
|
||||
def __adds{a:T,b:T & T==[32]u8 } = emit{T, '_mm256_adds_epu8', a, b}
|
||||
|
||||
def __subs{a:T,b:T & T==[16]i16} = emit{T, '_mm256_subs_epi16', a, b}
|
||||
def __subs{a:T,b:T & T==[16]u16} = emit{T, '_mm256_subs_epu16', a, b}
|
||||
def __subs{a:T,b:T & T==[32]i8 } = emit{T, '_mm256_subs_epi8', a, b}
|
||||
def __subs{a:T,b:T & T==[32]u8 } = emit{T, '_mm256_subs_epu8', a, b}
|
||||
|
||||
def __add{a:T,b:T & w256{T} & isintv{T} & 8==width{eltype{T}}} = emit{T, '_mm256_add_epi8', a, b}
|
||||
def __add{a:T,b:T & w256{T} & isintv{T} & 16==width{eltype{T}}} = emit{T, '_mm256_add_epi16', a, b}
|
||||
def __add{a:T,b:T & w256{T} & isintv{T} & 32==width{eltype{T}}} = emit{T, '_mm256_add_epi32', a, b}
|
||||
def __add{a:T,b:T & w256{T} & isintv{T} & 64==width{eltype{T}}} = emit{T, '_mm256_add_epi64', a, b}
|
||||
|
||||
def __sub{a:T,b:T & w256{T} & isintv{T} & 8==width{eltype{T}}} = emit{T, '_mm256_sub_epi8', a, b}
|
||||
def __sub{a:T,b:T & w256{T} & isintv{T} & 16==width{eltype{T}}} = emit{T, '_mm256_sub_epi16', a, b}
|
||||
def __sub{a:T,b:T & w256{T} & isintv{T} & 32==width{eltype{T}}} = emit{T, '_mm256_sub_epi32', a, b}
|
||||
def __sub{a:T,b:T & w256{T} & isintv{T} & 64==width{eltype{T}}} = emit{T, '_mm256_sub_epi64', a, b}
|
||||
|
||||
def __mul {a:T,b:T & [16]i16==T} = emit{T, '_mm256_mullo_epi16', a, b}
|
||||
def __mulhi{a:T,b:T & [16]i16==T} = emit{T, '_mm256_mulhi_epi16', a, b}
|
||||
def __mul {a:T,b:T & [ 8]i32==T} = emit{T, '_mm256_mullo_epi32', a, b}
|
||||
def __mul {a:T,b:T & [ 8]u32==T} = emit{T, '_mm256_mullo_epu32', a, b}
|
||||
def __mul32{a:T,b:T & [ 4]i64==T} = emit{T, '_mm256_mul_epi32', a, b} # reads only low 32 bits of the arguments
|
||||
|
||||
def getmask{x:T & w256{T} & 8==width{eltype{T}}} = emit{u32, '_mm256_movemask_epi8', x}
|
||||
def getmask{x:T & w256{T} & 16==width{eltype{T}}} = {
|
||||
msk:u32 = getmask{emit{[32]u8, '_mm256_packs_epi16', x, broadcast{[16]u16, 0}}}
|
||||
(msk&255) | (msk>>8) # TODO try out permute4x64 for fixing up
|
||||
}
|
||||
}
|
||||
def any{x:T & w256{T} & isintv{T} & 16==width{eltype{T}}} = getmask{cast_v{[32]u8,x}}!=0
|
||||
def anyneg{x:T & w256{T} & 16==width{eltype{T}}} = getmask{cast_v{[32]u8, cast_v{[16]i16,x} < broadcast{[16]i16, 0}}}!=0
|
||||
|
||||
@ -1,10 +1,25 @@
|
||||
include 'skin/c'
|
||||
include 'arch/c'
|
||||
|
||||
# TODO move these to a more base file
|
||||
def trunc{T, x:U & isint{T} & isint{U} & T<=U} = emit{T, '', x}
|
||||
def ext {T, x:U & isint{T} & isint{U} & T>=U} = emit{T, '', x}
|
||||
def trunc{T, x & match{'number',kind{x}}} = cast{T, x}
|
||||
def ext {T, x & match{'number',kind{x}}} = cast{T, x}
|
||||
|
||||
def cdiv{a,b} = (a+b-1)/b
|
||||
def rare{x:u1} = emit{u1, '__builtin_expect', x, 0}
|
||||
def isunsigned{T} = isint{T} & ~issigned{T}
|
||||
def assert{x:u1} = emit{void, 'si_assert', x}
|
||||
|
||||
def cast_p{T, x} = emit{*T, '(void*)', x}
|
||||
|
||||
|
||||
def ty_iu{T & T==i8 } = u8; def ty_is{T & T==i8 } = u8
|
||||
def ty_iu{T & T==i16} = u16; def ty_is{T & T==i16} = u16
|
||||
def ty_iu{T & T==i32} = u32; def ty_is{T & T==i32} = u32
|
||||
def ty_iu{T & T==i64} = u64; def ty_is{T & T==i64} = u64
|
||||
|
||||
def unroll{vars,begin,end,block & match{kind{begin},'number'} & match{kind{end},'number'}} = {
|
||||
def f{i,l & i==l} = 0
|
||||
def f{i,l & i!=l} = {
|
||||
@ -20,6 +35,14 @@ def for{vars,begin,end,block} = {
|
||||
i = i+1
|
||||
}
|
||||
}
|
||||
def forc{F} {vars,begin,end,block} = {
|
||||
i:u64 = begin
|
||||
while (i < end) {
|
||||
exec{i, each{F, vars}, block}
|
||||
i = i+1
|
||||
}
|
||||
}
|
||||
|
||||
def maxvalue{T & T==u8 } = 0xff
|
||||
def maxvalue{T & T==u16} = 0xffff
|
||||
def maxvalue{T & T==u32} = 0xffffffff
|
||||
@ -28,4 +51,11 @@ def maxvalue{T & T==u32} = 0xffffffff
|
||||
def isintv{T} = isint{eltype{T}}
|
||||
def isfloatv{T} = isfloat{eltype{T}}
|
||||
def issignedv{T} = issigned{eltype{T}}
|
||||
def isunsignedv{T} = isunsigned{eltype{T}}
|
||||
def isunsignedv{T} = isunsigned{eltype{T}}
|
||||
def isvec{T} = match{typekind{T},'vector'}
|
||||
|
||||
|
||||
# non-vector variants of vector defs
|
||||
def broadcast{T, v & match{typekind{T},'primitive'}} = v
|
||||
def any{v:T & match{typekind{T},'primitive'}} = v
|
||||
def anyneg{v:T & match{typekind{T},'primitive'}} = v<0
|
||||
@ -11,7 +11,7 @@ def b_set{x:*u64, n:(Size), v:u1} = {
|
||||
else store{x,n>>6,p & ~m}
|
||||
}
|
||||
|
||||
def b_set{sz, x:*u64, n:(Size), v} = { vc:u64 = cast_i{u64,v}
|
||||
def b_set{sz, x:*u64, n:(Size), v} = { vc:u64 = ext{u64,v}
|
||||
am:u64 = 64/sz;
|
||||
w:u64 = load{x,n/am}
|
||||
sh:u64 = (n&(am-1)) * sz
|
||||
@ -21,7 +21,7 @@ def b_set{sz, x:*u64, n:(Size), v} = { vc:u64 = cast_i{u64,v}
|
||||
}
|
||||
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==4} = {
|
||||
x8:= cast_p{*u8, x}
|
||||
x8:= cast_p{u8, x}
|
||||
|
||||
#w:u64 = cast_i{u64, load{x8,n/2}}
|
||||
#sh:u64 = (n&1) * 4
|
||||
@ -30,16 +30,16 @@ def b_set{sz, x:*u64, n:(Size), v & sz==4} = {
|
||||
|
||||
w:u8 = load{x8,n/2}
|
||||
if ((n&1)==1) {
|
||||
w = w & ~(cast {u8,15}<<4)
|
||||
w = w & ~(cast{u8,15}<<4)
|
||||
w = w | (cast_i{u8,v}<<4)
|
||||
} else {
|
||||
w = w & ~(cast {u8,15})
|
||||
w = w & ~(cast{u8,15})
|
||||
w = w | (cast_i{u8,v}<<0)
|
||||
}
|
||||
|
||||
store{x8, n/2, cast_i{u8,w}}
|
||||
}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz== 8} = store{cast_p{*u8, x}, n, cast_i{u8, v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==16} = store{cast_p{*u16, x}, n, cast_i{u16,v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==32} = store{cast_p{*u32, x}, n, cast_i{u32,v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==64} = store{x, n, cast_i{u64,v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz== 8} = store{cast_p{u8, x}, n, cast_i{u8, v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==16} = store{cast_p{u16, x}, n, cast_i{u16,v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==32} = store{cast_p{u32, x}, n, cast_i{u32,v}}
|
||||
def b_set{sz, x:*u64, n:(Size), v & sz==64} = store{x, n, cast_i{u64,v}}
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
def Size = u64
|
||||
def bcall{T, f, x} = emit{T, 'BCALL', f, x}
|
||||
def from_B{T, x & T==f64} = bcall{T, 'o2fu', x}
|
||||
def from_B{T, x & T<=i32 & issigned{T}} = bcall{T, 'o2iu', x}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
def Size = u64
|
||||
include './base'
|
||||
include './f64'
|
||||
include './cbqnDefs'
|
||||
include './f64'
|
||||
include './avx'
|
||||
include './avx2'
|
||||
include './bitops'
|
||||
@ -31,15 +30,15 @@ def eqne{op} = match{op,__eq}|match{op,__ne}
|
||||
def pathAS{dst, len, T, op, x & issigned{T}} = {
|
||||
def R{f & eqne{op}} = {
|
||||
if (rare{floor{f}!=f}) fillbits{dst, len, op{0,1}, x} # also includes check for NaN/sNaN
|
||||
ftrunc_i64{f}
|
||||
ftrunc{i64,f}
|
||||
}
|
||||
def R{f & match{op,__lt}|match{op,__ge}} = ftrunc_i64{ceil{f}}
|
||||
def R{f & match{op,__gt}|match{op,__le}} = ftrunc_i64{floor{f}}
|
||||
def R{f & match{op,__lt}|match{op,__ge}} = ftrunc{i64,ceil{f}}
|
||||
def R{f & match{op,__gt}|match{op,__le}} = ftrunc{i64,floor{f}}
|
||||
|
||||
xf:f64 = interp_f64{x}
|
||||
xi64:i64 = R{xf}
|
||||
xT:T = cast_i{T, xi64}
|
||||
if (rare{(cast_i{i64, xT}!=xi64)}) {
|
||||
xT:T = trunc{T, xi64}
|
||||
if (rare{ext{i64, xT}!=xi64}) {
|
||||
cif{~eqne{op}, {_}=>{ # NaN was already checked for ≠/=
|
||||
if (isNaN{xf}) { call{cmpIX, dst, len, x, op{0,1}}; return{}; }
|
||||
}}
|
||||
@ -63,7 +62,7 @@ def pathAS{dst, len, T, op, x & isunsigned{T}} = {
|
||||
}
|
||||
xc32:u32 = from_B{u32,x}
|
||||
if (xc32 > maxvalue{T}) fillbits{dst, len, op{0,1}}
|
||||
cast_i{T, xc32}
|
||||
trunc{T, xc32}
|
||||
}
|
||||
|
||||
|
||||
@ -76,27 +75,27 @@ def any2bit{VT, unr, op, wS, wV, xS, xV, dst:*u64, len:(Size)} = {
|
||||
assert{am>0}
|
||||
while (ri < am) {
|
||||
r:u64 = 0
|
||||
@unroll (j from 0 to unr) r = r | (cast_i{u64, getmask{op{wV{xi+j}, xV{xi+j}}}} << (j*vcount{VT}))
|
||||
@unroll (j from 0 to unr) r = r | (ext{u64, getmask{op{wV{xi+j}, xV{xi+j}}}} << (j*vcount{VT}))
|
||||
b_set{bam, dst, ri, r}
|
||||
xi = xi+unr
|
||||
ri = ri+1
|
||||
}
|
||||
}
|
||||
aa2bit{VT, unr, op}(dst:*u64, wr:*u8, xr:*u8, len:Size) : void = {
|
||||
wv:= cast_vp{VT, wr}; ws:= cast_p{*eltype{VT}, wr}
|
||||
xv:= cast_vp{VT, xr}; xs:= cast_p{*eltype{VT}, xr}
|
||||
any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>vload{wv,i}, {i}=>load{xs,i}, {i}=>vload{xv,i}, dst, len}
|
||||
wv:= cast_vp{VT, wr}; ws:= cast_p{eltype{VT}, wr}
|
||||
xv:= cast_vp{VT, xr}; xs:= cast_p{eltype{VT}, xr}
|
||||
any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>load{wv,i}, {i}=>load{xs,i}, {i}=>load{xv,i}, dst, len}
|
||||
}
|
||||
|
||||
as2bit{VT, unr, op}(dst:*u64, wr:*u8, x:u64, len:Size) : void = { # show{VT,unr,fmt{op}}
|
||||
wv:= cast_vp{VT, wr}; ws:= cast_p{*eltype{VT}, wr}
|
||||
wv:= cast_vp{VT, wr}; ws:= cast_p{eltype{VT}, wr}
|
||||
xv:= broadcast{VT, pathAS{dst, len, eltype{VT}, op, x}}
|
||||
any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>vload{wv,i}, {i}=>x, {i}=>xv, dst, len}
|
||||
any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>load{wv,i}, {i}=>x, {i}=>xv, dst, len}
|
||||
}
|
||||
|
||||
bitAA{bitop}(dst:*u64, wr:*u8, xr:*u8, len:Size) : void = {
|
||||
ws:= cast_p{*u64, wr}
|
||||
xs:= cast_p{*u64, xr}
|
||||
ws:= cast_p{u64, wr}
|
||||
xs:= cast_p{u64, xr}
|
||||
@for (dst,ws,xs over _ from 0 to cdiv{len,64}) dst = bitop{ws,xs}
|
||||
}
|
||||
|
||||
@ -115,8 +114,8 @@ bitAS{op}(dst:*u64, wr:*u8, x:u64, len:Size) : void = { # show{'bitAS'}
|
||||
fillbits{dst, len, r0}
|
||||
return{}
|
||||
}
|
||||
if (r0) call{not, dst, cast_p{*u64,wr}, len}
|
||||
else call{cpy, dst, cast_p{*u64,wr}, len}
|
||||
if (r0) call{not, dst, cast_p{u64,wr}, len}
|
||||
else call{cpy, dst, cast_p{u64,wr}, len}
|
||||
}
|
||||
|
||||
|
||||
|
||||
123
src/singeli/src/dyarith.singeli
Normal file
123
src/singeli/src/dyarith.singeli
Normal file
@ -0,0 +1,123 @@
|
||||
include './base'
|
||||
include './f64'
|
||||
include './cbqnDefs'
|
||||
include './avx'
|
||||
include './avx2'
|
||||
include './bitops'
|
||||
|
||||
|
||||
def rootty{T & match{typekind{T},'primitive'}} = T
|
||||
def rootty{T & match{typekind{T},'vector'}} = eltype{T}
|
||||
|
||||
# TODO more to some more headerlike file
|
||||
def ty_dbl{T & i8==T} = i16
|
||||
def ty_dbl{T & i16==T} = i32
|
||||
def ty_dbl{T & i32==T} = i64
|
||||
def ty_dbl{T & isvec{T}} = [vcount{T}/2](ty_dbl{eltype{T}})
|
||||
def dcast_i{x} = ext{ty_dbl{type{x}}, x}
|
||||
|
||||
# + & -
|
||||
def arithChk1{F, w:T, x:T, r:T & match{F,__add}} = anyneg{(w^r) & (x^r)}
|
||||
def arithChk1{F, w:T, x:T, r:T & match{F,__sub}} = anyneg{(w^x) & (w^r)}
|
||||
def arithChk1{F, w:T, x:T, r:T & match{F,__add} & isvec{T} & width{eltype{T}}<=16} = any{__adds{w,x}!=r}
|
||||
def arithChk1{F, w:T, x:T, r:T & match{F,__sub} & isvec{T} & width{eltype{T}}<=16} = any{__subs{w,x}!=r}
|
||||
|
||||
def arithChk2{F, w:T, x:T, i & issigned{rootty{T}}} = {
|
||||
r:= F{w,x}
|
||||
tup{r, arithChk1{F, w, x, r}}
|
||||
}
|
||||
|
||||
# ×/∧
|
||||
def arithChk2{F, w:T, x:T, i & match{F,__mul} & match{typekind{T},'primitive'}} = {
|
||||
r:= F{dcast_i{w}, dcast_i{x}}
|
||||
tup{r, r!=ext{type{r}, trunc{T, r}}}
|
||||
}
|
||||
|
||||
def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i16==eltype{T}} = {
|
||||
rl:= __mul {w,x}
|
||||
rh:= __mulhi{w,x}
|
||||
tup{rl, any{rh != rl>>15}}
|
||||
}
|
||||
def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i8==eltype{T}} = {
|
||||
def wp = unpackQ{w, cast_v{T,broadcast{T,0}>w}}
|
||||
def xp = unpackQ{x, cast_v{T,broadcast{T,0}>x}}
|
||||
def rp = each{__mul, wp, xp}
|
||||
def bad = each{{v}=>(v<<8)>>8 != v, rp}
|
||||
tup{packQ{rp}, any{tupsel{0,bad}|tupsel{1,bad}}}
|
||||
}
|
||||
def arithChk2{F, w:T, x:T, i & match{F,__mul} & isvec{T} & i32==eltype{T}} = {
|
||||
max:= broadcast{[8]u32, 0x4efffffe}
|
||||
def cf32{x} = emit{[8]u32, '_mm256_cvtepi32_ps', x}
|
||||
f32mul:= emit{[8]f32, '_mm256_mul_ps', cf32{w}, cf32{x}}
|
||||
tup{w*x, any{emit{[8]u32, '_mm256_cmp_ps', abs{f32mul}, max, 29}}}
|
||||
# TODO fallback to the below if the above fails
|
||||
# def wp = unpackQ{w, broadcast{T, 0}}
|
||||
# def xp = unpackQ{x, broadcast{T, 0}}
|
||||
# def rp = each{__mul32, wp, xp}
|
||||
# def T2 = ty_dbl{T}
|
||||
# def bad = each{{v}=>{
|
||||
# ((cast_v{T2,v} + broadcast{T2,0x80000000}) ^ broadcast{T2, cast{i64,1}<<63}) > broadcast{T2, cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF}}
|
||||
# }, rp}
|
||||
# tup{packQQ{each{{v} => v&broadcast{T2, 0xFFFFFFFF}, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}}
|
||||
}
|
||||
|
||||
|
||||
# f64
|
||||
def arithChk3{F, w:T, x:T, i} = {
|
||||
def r2 = arithChk2{F, w, x, i}
|
||||
if (rare{tupsel{1,r2}}) return{i}
|
||||
tupsel{0,r2}
|
||||
}
|
||||
|
||||
def arithChk3{F, w:T, x:T, i & f64==rootty{T}} = F{w,x}
|
||||
|
||||
def arithAA{VT, F, w, x, r, len} = {
|
||||
def bam = vcount{VT}
|
||||
def vv = len/bam
|
||||
@forv{VT} (w,x,r over i from 0 to vv) r = arithChk3{F, w, x, i*bam}
|
||||
@for (w,x,r over i from vv*bam to len) r = arithChk3{F, w, x, i}
|
||||
len
|
||||
}
|
||||
def arithAS{VT, F, w, x, r, len} = {
|
||||
def bam = vcount{VT}
|
||||
def vv = len/bam
|
||||
xv:= broadcast{VT, x}
|
||||
@forv{VT} (w,r over i from 0 to vv) r = arithChk3{F, w, xv, i*bam}
|
||||
@for (w,r over i from vv*bam to len) r = arithChk3{F, w, x, i}
|
||||
len
|
||||
}
|
||||
def arithSA{VT, F, w, x, r, len} = {
|
||||
def bam = vcount{VT}
|
||||
def vv = len/bam
|
||||
wv:= broadcast{VT, w}
|
||||
@forv{VT} (x,r over i from 0 to vv) r = arithChk3{F, wv, x, i*bam}
|
||||
@for (x,r over i from vv*bam to len) r = arithChk3{F, w, x, i}
|
||||
len
|
||||
}
|
||||
|
||||
|
||||
# cast a guaranteed float to a more specific type; return{0} if not possible
|
||||
def cast_fB{T, x:(u64) & f64==T} = from_B{f64, x}
|
||||
def cast_fB{T, x:(u64) & issigned{T} & T<i64} = {
|
||||
f:f64 = from_B{f64, x}
|
||||
r:T = ftrunc{T, f}
|
||||
if (rare{f!=fext{r}}) return{cast{Size,0}}
|
||||
r
|
||||
}
|
||||
|
||||
arithAA{F,VT}(w: *u8, x: *u8, r: *u8, len: Size) : Size = { def c{x}=cast_p{eltype{VT}, x}; arithAA{VT, F, c{w}, c{x}, c{r}, len} }
|
||||
arithAS{F,VT}(w: *u8, x: u64, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithAS{VT, F, cast_p {T, w}, cast_fB{T, x}, cast_p{T, r}, len} }
|
||||
arithSA{F,VT}(w: u64, x: *u8, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithSA{VT, F, cast_fB{T, w}, cast_p {T, x}, cast_p{T, r}, len} }
|
||||
|
||||
'avx2_addAA_i8' = arithAA{__add,[32]i8 }; 'avx2_addAS_i8' = arithAS{__add,[32]i8 }; 'avx2_addSA_i8' = arithSA{__add,[32]i8 }
|
||||
'avx2_addAA_i16' = arithAA{__add,[16]i16}; 'avx2_addAS_i16' = arithAS{__add,[16]i16}; 'avx2_addSA_i16' = arithSA{__add,[16]i16}
|
||||
'avx2_addAA_i32' = arithAA{__add,[ 8]i32}; 'avx2_addAS_i32' = arithAS{__add,[ 8]i32}; 'avx2_addSA_i32' = arithSA{__add,[ 8]i32}
|
||||
'avx2_addAA_f64' = arithAA{__add,[ 4]f64}; 'avx2_addAS_f64' = arithAS{__add,[ 4]f64}; 'avx2_addSA_f64' = arithSA{__add,[ 4]f64}
|
||||
'avx2_subAA_i8' = arithAA{__sub,[32]i8 }; 'avx2_subAS_i8' = arithAS{__sub,[32]i8 }; 'avx2_subSA_i8' = arithSA{__sub,[32]i8 }
|
||||
'avx2_subAA_i16' = arithAA{__sub,[16]i16}; 'avx2_subAS_i16' = arithAS{__sub,[16]i16}; 'avx2_subSA_i16' = arithSA{__sub,[16]i16}
|
||||
'avx2_subAA_i32' = arithAA{__sub,[ 8]i32}; 'avx2_subAS_i32' = arithAS{__sub,[ 8]i32}; 'avx2_subSA_i32' = arithSA{__sub,[ 8]i32}
|
||||
'avx2_subAA_f64' = arithAA{__sub,[ 4]f64}; 'avx2_subAS_f64' = arithAS{__sub,[ 4]f64}; 'avx2_subSA_f64' = arithSA{__sub,[ 4]f64}
|
||||
'avx2_mulAA_i8' = arithAA{__mul,[32]i8 }; 'avx2_mulAS_i8' = arithAS{__mul,[32]i8 }; 'avx2_mulSA_i8' = arithSA{__mul,[32]i8 }
|
||||
'avx2_mulAA_i16' = arithAA{__mul,[16]i16}; 'avx2_mulAS_i16' = arithAS{__mul,[16]i16}; 'avx2_mulSA_i16' = arithSA{__mul,[16]i16}
|
||||
'avx2_mulAA_i32' = arithAA{__mul,[ 8]i32}; 'avx2_mulAS_i32' = arithAS{__mul,[ 8]i32}; 'avx2_mulSA_i32' = arithSA{__mul,[ 8]i32}
|
||||
'avx2_mulAA_f64' = arithAA{__mul,[ 4]f64}; 'avx2_mulAS_f64' = arithAS{__mul,[ 4]f64}; 'avx2_mulSA_f64' = arithSA{__mul,[ 4]f64}
|
||||
@ -5,7 +5,10 @@ def NaN = 0.0/0.0
|
||||
def isNaN{x:f64} = x!=x
|
||||
def qNaN{x:u64} = (x<<1) == (cast{u64, 0x8ff8} << 49)
|
||||
|
||||
def ftrunc_i32{x:f64} = emit{i32, '', x} # maybe explicitly use _mm_cvtsd_si32
|
||||
def ftrunc_i64{x:f64} = emit{i64, '', x}
|
||||
def ftrunc{T, x:f64 & i8==T} = emit{i8, '', x}
|
||||
def ftrunc{T, x:f64 & i16==T} = emit{i16, '', x}
|
||||
def ftrunc{T, x:f64 & i32==T} = emit{i32, '', x} # maybe explicitly use _mm_cvtsd_si32?
|
||||
def ftrunc{T, x:f64 & i64==T} = emit{i64, '', x}
|
||||
def fext{x} = emit{f64, '', x}
|
||||
|
||||
def interp_f64{x:u64} = emit{f64, 'interp_f64', x}
|
||||
Loading…
Reference in New Issue
Block a user