diff --git a/makefile b/makefile index cfae1da0..3be4e44f 100644 --- a/makefile +++ b/makefile @@ -216,12 +216,27 @@ preSingeliBin: @${MAKE} i_singeli=0 singeli=0 force_build_dir=obj/presingeli f= lf= postmsg="singeli sources:" i_t=presingeli i_f='-O1 -DPRE_SINGELI' FFI=0 OUTPUT=obj/presingeli/BQN c -build_singeli: ${addprefix src/singeli/gen/, cmp.c dyarith.c copy.c equal.c squeeze.c scan.c slash.c} +build_singeli: ${addprefix src/singeli/gen/, cmp.c dyarith2.c copy.c equal.c squeeze.c scan.c slash.c} @echo $(postmsg) src/singeli/gen/%.c: src/singeli/src/%.singeli preSingeliBin @echo $< | cut -c 17- | sed 's/^/ /' @obj/presingeli/BQN SingeliMake.bqn "$$(if [ -d Singeli ]; then echo Singeli; else echo SingeliClone; fi)" $< $@ "obj/presingeli/" +ifeq (${i_singeli}, 1) +# arithmetic singeli generator +src/builtins/arithd2.c: src/singeli/c/dyarith2.c +src/singeli/c/dyarith2.c: src/singeli/gen/dyarithTables.c +src/singeli/src/dyarith2.singeli: src/singeli/gen/dyarithDefs.singeli + +src/singeli/gen/dyarithDefs.singeli: genArithTables +src/singeli/gen/dyarithTables.c: genArithTables + +.INTERMEDIATE: genArithTables +genArithTables: src/singeli/src/genArithTables.bqn preSingeliBin + @echo " generating dyarithDefs.singeli & dyarithTables.c" + @obj/presingeli/BQN src/singeli/src/genArithTables.bqn "$$PWD/src/singeli/gen/dyarithDefs.singeli" "$$PWD/src/singeli/gen/dyarithTables.c" +endif + # dependency files diff --git a/src/builtins/arithd.c b/src/builtins/arithd.c index de808f15..314d9ce9 100644 --- a/src/builtins/arithd.c +++ b/src/builtins/arithd.c @@ -2,14 +2,18 @@ #include "../utils/each.h" #include +static f64 pfmod(f64 a, f64 b) { + f64 r = fmod(a, b); + if (a<0 != b<0 && r!=0) r+= b; + return r; +} + #if SINGELI + #define BCALL(N, X) N(b(X)) #define interp_f64(X) b(X).f -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#include "../singeli/gen/dyarith.c" -#pragma GCC diagnostic pop +#include "../singeli/c/arithd2Impl.c" #endif #define P2(N) { if(isArr(w)|isArr(x)) { \ @@ -158,39 +162,40 @@ NOINLINE B NAME##_c2_arr(B t, B w, B x) { \ EXTRA2 \ if (isArr(w)|isArr(x)) { \ - if (isArr(w)&isArr(x) && RNK(w)==RNK(x)) { \ - if (!eqShPart(SH(w), SH(x), RNK(w))) thrF(SYMB ": Expected equal shape prefix (%H ≡ ≢𝕨, %H ≡ ≢𝕩)", w, x); \ - usz ia = IA(x); \ - u8 we = TI(w,elType); \ - u8 xe = TI(x,elType); \ - if ((we==el_bit | xe==el_bit) && (we|xe)<=el_f64) { \ - if (BIT && (we|xe)==0) return bitAA##BIT(w,x,ia); \ - B wt=w,xt=x; \ - we=xe=iMakeEq(&wt, &xt, we, xe); \ - w=wt; x=xt; \ - } \ - if ((we==el_i32|we==el_f64)&(xe==el_i32|xe==el_f64)) { \ - bool wei = we==el_i32; bool xei = xe==el_i32; \ - if (wei&xei) { PI32(w)PI32(x)SI_AA(NAME,i32,rcf64) DOI32(EXPR,w,wp[i],xp[i],rcf64) } \ - if (!wei&!xei) {PF(w)PF(x) { SI_AA(NAME,f64,base) } Rf64(x) DOF(EXPR,w,wp[i],xp[i]) decG(w);decG(x);return r; } \ - rcf64:; Rf64(x) \ - if (wei) { PI32(w) \ - if (xei) { PI32(x) DOF(EXPR,w,wp[i],xp[i]) } \ - else { PF (x) DOF(EXPR,w,wp[i],xp[i]) } \ - } else {PF(w)PI32(x) DOF(EXPR,w,wp[i],xp[i]) } \ - decG(w); decG(x); return num_squeeze(r); \ - } \ - if(we==el_i8 & xe==el_i8 ) { PI8 (w) PI8 (x) SI_AA(NAME, i8,base) DOI8 (EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i16 & xe==el_i16) { PI16(w) PI16(x) SI_AA(NAME,i16,base) DOI16(EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i8 & xe==el_i32) { PI8 (w) PI32(x) DOI32(EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i32 & xe==el_i8 ) { PI32(w) PI8 (x) DOI32(EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i16 & xe==el_i32) { PI16(w) PI32(x) DOI32(EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i32 & xe==el_i16) { PI32(w) PI16(x) DOI32(EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i16 & xe==el_i8 ) { PI16(w) PI8 (x) DOI16(EXPR,w,wp[i],xp[i],base) } \ - if(we==el_i8 & xe==el_i16) { PI8 (w) PI16(x) DOI16(EXPR,w,wp[i],xp[i],base) } \ - } \ - else if (isF64(w)&isArr(x)) { usz ia=IA(x); u8 xe=TI(x,elType); DO_SA(NAME,EXPR) } \ - else if (isF64(x)&isArr(w)) { usz ia=IA(w); u8 we=TI(w,elType); DO_AS(NAME,EXPR) } \ + if (isArr(w)&isArr(x)) { SI_AA(NAME) \ + if (RNK(w)==RNK(x)) { \ + if (!eqShPart(SH(w), SH(x), RNK(w))) thrF(SYMB ": Expected equal shape prefix (%H ≡ ≢𝕨, %H ≡ ≢𝕩)", w, x); \ + usz ia = IA(x); \ + u8 we = TI(w,elType); \ + u8 xe = TI(x,elType); \ + if ((we==el_bit | xe==el_bit) && (we|xe)<=el_f64) { \ + if (BIT && (we|xe)==0) return bitAA##BIT(w,x,ia); \ + B wt=w,xt=x; \ + we=xe=iMakeEq(&wt, &xt, we, xe); \ + w=wt; x=xt; \ + } \ + if ((we==el_i32|we==el_f64)&(xe==el_i32|xe==el_f64)) { \ + bool wei = we==el_i32; bool xei = xe==el_i32; \ + if (wei&xei) { PI32(w)PI32(x)DOI32(EXPR,w,wp[i],xp[i],rcf64) } \ + if (!wei&!xei) {PF(w)PF(x) { } Rf64(x) DOF(EXPR,w,wp[i],xp[i]) decG(w);decG(x);return r; } \ + rcf64:; Rf64(x) \ + if (wei) { PI32(w) \ + if (xei) { PI32(x) DOF(EXPR,w,wp[i],xp[i]) } \ + else { PF (x) DOF(EXPR,w,wp[i],xp[i]) } \ + } else {PF(w)PI32(x) DOF(EXPR,w,wp[i],xp[i]) } \ + decG(w); decG(x); return num_squeeze(r); \ + } \ + if(we==el_i8 & xe==el_i8 ) { PI8 (w) PI8 (x) DOI8 (EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i16 & xe==el_i16) { PI16(w) PI16(x) DOI16(EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i8 & xe==el_i32) { PI8 (w) PI32(x) DOI32(EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i32 & xe==el_i8 ) { PI32(w) PI8 (x) DOI32(EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i16 & xe==el_i32) { PI16(w) PI32(x) DOI32(EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i32 & xe==el_i16) { PI32(w) PI16(x) DOI32(EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i16 & xe==el_i8 ) { PI16(w) PI8 (x) DOI16(EXPR,w,wp[i],xp[i],base) } \ + if(we==el_i8 & xe==el_i16) { PI8 (w) PI16(x) DOI16(EXPR,w,wp[i],xp[i],base) } \ + } \ + } else if (isF64(w)&isArr(x)) { usz ia=IA(x); u8 xe=TI(x,elType); DO_SA(NAME,EXPR) } \ + else if (isF64(x)&isArr(w)) { usz ia=IA(w); u8 we=TI(w,elType); DO_AS(NAME,EXPR) } \ base: P2(NAME) \ } \ thrM(SYMB ": Unexpected argument types"); \ @@ -210,13 +215,6 @@ } #endif // TYPED_ARITH -static f64 pfmod(f64 a, f64 b) { - f64 r = fmod(a, b); - if (a<0 != b<0 && r!=0) r+= b; - return r; -} - -#define NO_SI_AA(N,S,BASE) #define REG_SA(NAME, EXPR) \ if (xe==el_bit) return bit_sel1Fn(NAME##_c2,w,x,1); \ if (xe==el_i8 && q_i8 (w)) { PI8 (x) i8 wc=o2iu(w); DOI8 (EXPR,x,wc,xp[i],sa8B ) } sa8B :; \ @@ -230,8 +228,9 @@ static f64 pfmod(f64 a, f64 b) { if (we==el_i32 && q_i32(x)) { PI32(w) i32 xc=o2iu(x); DOI32(EXPR,w,wp[i],xc,as32B) } as32B:; \ if (we==el_f64) { Rf64(w) PF(w) DOF(EXPR,x,wp[i],x.f) decG(w); return num_squeeze(r); } + +#define NO_SI_AA(N) #if SINGELI - #define SI_AA(N,S,BASE) R##S(x); usz rlen=avx2_##N##AA##_##S((void*)wp, (void*)xp, (void*)rp, ia); if(RARE(rlen!=ia)) { decG(r); goto BASE; } decG(w);decG(x);return r; #define SI_SA_I(N,S,W,BASE) R##S(x); usz rlen=avx2_##N##SA##_##S((W).u, (void*)xp, (void*)rp, ia); if(RARE(rlen!=ia)) { decG(r); goto BASE; } dec (w);decG(x);return r; #define SI_AS_I(N,S,X,BASE) R##S(w); usz rlen=avx2_##N##AS##_##S((void*)wp, (X).u, (void*)rp, ia); if(RARE(rlen!=ia)) { decG(r); goto BASE; } decG(w);dec (x);return r; #define SI_SA(NAME, EXPR) \ @@ -254,6 +253,8 @@ static f64 pfmod(f64 a, f64 b) { case el_f64: { SI_AS_I(NAME,f64,x,asBad) } \ case el_c8: case el_c16: case el_c32: case el_B:; /*fallthrough*/ \ } asBad:; + #define SI_AA(N) return do_dyArith(&N##DyTable, w, x); + // #define SI_AA NO_SI_AA #else #define SI_AA NO_SI_AA #define SI_AS REG_AS @@ -313,10 +314,10 @@ GC2i("¬", not, 1+wv-xv, { if (isC32(w) & isC32(x)) return m_f64(1 + (i32)(u32)w.u - (i32)(u32)x.u); }, {}, 0, NO_SI_AA, REG_AS, REG_SA) GC2i("×", mul, wv*xv, {}, {}, 2, SI_AA, SI_AS, SI_SA) -GC2i("∧", and, wv*xv, {}, {}, 2, NO_SI_AA, REG_AS, REG_SA) -GC2i("∨", or , (wv+xv)-(wv*xv), {}, {}, 1, NO_SI_AA, REG_AS, REG_SA) -GC2i("⌊", floor, wv>xv?xv:wv, {}, {}, 2, NO_SI_AA, REG_AS, REG_SA) // optimizer optimizes out the fallback mess -GC2i("⌈", ceil , wv>xv?wv:xv, {}, {}, 1, NO_SI_AA, REG_AS, REG_SA) +GC2i("∧", and, wv*xv, {}, {}, 2, SI_AA, REG_AS, REG_SA) +GC2i("∨", or , (wv+xv)-(wv*xv), {}, {}, 1, SI_AA, REG_AS, REG_SA) +GC2i("⌊", floor, wv>xv?xv:wv, {}, {}, 2, SI_AA, REG_AS, REG_SA) // optimizer optimizes out the fallback mess +GC2i("⌈", ceil , wv>xv?wv:xv, {}, {}, 1, SI_AA, REG_AS, REG_SA) GC2f("÷", div , w.f/x.f, {}) GC2f("⋆", pow , pow(w.f, x.f), {}) diff --git a/src/builtins/internal.c b/src/builtins/internal.c index 00703e82..6a3e5593 100644 --- a/src/builtins/internal.c +++ b/src/builtins/internal.c @@ -285,7 +285,9 @@ B internalTemp_c1(B t, B x) { #endif return x; } +#if !SINGELI B internalTemp_c2(B t, B w, B x) { dec(w); return x; } +#endif B heapDump_c1(B t, B x) { cbqn_heapDump(); diff --git a/src/singeli/c/arithd2Impl.c b/src/singeli/c/arithd2Impl.c new file mode 100644 index 00000000..4bff6b73 --- /dev/null +++ b/src/singeli/c/arithd2Impl.c @@ -0,0 +1,181 @@ +#include "../../core.h" +#include "../../utils/each.h" +#include +// #define ARITH_DEBUG 1 + +typedef u64 (*CheckedFn)(u8* r, u8* w, u8* x, u64 len); +typedef void (*UncheckedFn)(u8* r, u8* w, u8* x, u64 len); +#define FOR_ExecAA(F) \ + F(fail) /* first to allow zero-initialization to be fail implicitly */ \ + F(swap) /* swap 𝕨 and 𝕩, then run ex2 */ \ + /* cast the specified argument up to the specified size, then either swap or don't, then run ex2 */ \ + F(wi8_reg) F(xi8_reg) F(wi8_swap) F(xi8_swap) \ + F(wi16_reg) F(xi16_reg) F(wi16_swap) F(xi16_swap) \ + F(wi32_reg) F(xi32_reg) F(wi32_swap) F(xi32_swap) \ + F(wf64_reg) F(xf64_reg) F(wf64_swap) F(xf64_swap) \ + F(wc16_reg) F(xc16_reg) F(wc16_swap) F(xc16_swap) \ + F(wc32_reg) F(xc32_reg) F(wc32_swap) F(xc32_swap) \ + /* c_* - overflow-checked; u_* - no overflow check */ \ + F(c_call_rbyte) /* arguments are already the wanted widths; result isn't a bitarr */ \ + F(u_call_rbyte) /* ↑ */ \ + F(e_call_rbyte) /* calls CheckedFn but errors on non-zero result */ \ + F(u_call_bit) /* result and arguments are bitarrs */ \ + F(u_call_wxf64sq) /* convert both args up to f64 if needed, make f64arr, and squeeze result; i.e. lazy float fallback case */ \ + F(c_call_wxi8) /* convert both args (which need to be bitarrs) to i8arrs, and invoke checked function (no good reason for it to fail the check, but this allows reusing a c‿i8‿i8 impl) */ \ + F(e_call_sqx) /* squeeze f64arr 𝕩, error if can't; else re-dispatch to new entry */ + +enum ExecAA { + #define F(X) X, + FOR_ExecAA(F) + #undef F +}; + +#if ARITH_DEBUG +char* execAA_repr(u8 ex) { + switch(ex) { default: return "(unknown)"; + #define F(X) case X: return #X; + FOR_ExecAA(F) + #undef F + } +} +#endif + + +typedef struct FnInfo { + union { CheckedFn cFn; UncheckedFn uFn; }; + u8 ex1, ex2; // ExecAA + u8 type; // t_*; unused for u_call_bit + u8 width; // width in bytes; unused for u_call_bit +} FnInfo; +typedef struct EntAA { + FnInfo a, b; +} EntAA; + +typedef struct DyTable { + EntAA entsAA[8*8]; // one for each instruction + BBB2B mainFn; + char* repr; +} DyTable; + +NOINLINE B do_dyArith(DyTable* table, B w, B x) { + B r; + + if (1 || isArr(w)) { + u8 we = TI(w, elType); + if (we==el_B) goto rec; + if (1 || isArr(x)) { + u8 xe = TI(x, elType); + if (xe==el_B) goto rec; + ur wr = RNK(w); + ur xr = RNK(x); + if (wr!=xr || !eqShPart(SH(w), SH(x), wr)) goto rec; + + usz ia = IA(w); + EntAA* e = &table->entsAA[we*8 + xe]; + newEnt: + + FnInfo* fn = &e->a; + newFn: + u8 ex = fn->ex1; + newEx: + B t; + #if ARITH_DEBUG + printf("opcode %d / %s\n", ex, execAA_repr(ex)); + #endif + switch(ex) { default: UD; + case wi8_reg: w=taga( cpyI8Arr(w)); goto do_ex2; case xi8_reg: x=taga( cpyI8Arr(x)); goto do_ex2; + case wi16_reg: w=taga(cpyI16Arr(w)); goto do_ex2; case xi16_reg: x=taga(cpyI16Arr(x)); goto do_ex2; + case wi32_reg: w=taga(cpyI32Arr(w)); goto do_ex2; case xi32_reg: x=taga(cpyI32Arr(x)); goto do_ex2; + case wf64_reg: w=taga(cpyF64Arr(w)); goto do_ex2; case xf64_reg: x=taga(cpyF64Arr(x)); goto do_ex2; + case wc16_reg: w=taga(cpyC16Arr(w)); goto do_ex2; case xc16_reg: x=taga(cpyC16Arr(x)); goto do_ex2; + case wc32_reg: w=taga(cpyC32Arr(w)); goto do_ex2; case xc32_reg: x=taga(cpyC32Arr(x)); goto do_ex2; + case wi8_swap: t=x; x=taga( cpyI8Arr(w)); w=t; goto do_ex2; case xi8_swap: t=w; w=taga( cpyI8Arr(x)); x=t; goto do_ex2; + case wi16_swap: t=x; x=taga(cpyI16Arr(w)); w=t; goto do_ex2; case xi16_swap: t=w; w=taga(cpyI16Arr(x)); x=t; goto do_ex2; + case wi32_swap: t=x; x=taga(cpyI32Arr(w)); w=t; goto do_ex2; case xi32_swap: t=w; w=taga(cpyI32Arr(x)); x=t; goto do_ex2; + case wf64_swap: t=x; x=taga(cpyF64Arr(w)); w=t; goto do_ex2; case xf64_swap: t=w; w=taga(cpyF64Arr(x)); x=t; goto do_ex2; + case wc16_swap: t=x; x=taga(cpyC16Arr(w)); w=t; goto do_ex2; case xc16_swap: t=w; w=taga(cpyC16Arr(x)); x=t; goto do_ex2; + case wc32_swap: t=x; x=taga(cpyC32Arr(w)); w=t; goto do_ex2; case xc32_swap: t=w; w=taga(cpyC32Arr(x)); x=t; goto do_ex2; + case swap: t=w; w=x; x=t; goto do_ex2; + do_ex2: ex = fn->ex2; goto newEx; + + case c_call_rbyte: { c_call_rbyte: + u64 got = fn->cFn(m_tyarrlc(&r, fn->width, x, fn->type), tyany_ptr(w), tyany_ptr(x), ia); + if (got==ia) goto decG_ret; + decG(r); + fn = &e->b; + goto newFn; + } + case u_call_rbyte: { + fn->uFn(m_tyarrlc(&r, fn->width, x, fn->type), tyany_ptr(w), tyany_ptr(x), ia); + goto decG_ret; + } + case e_call_rbyte: { + u64 got = fn->cFn(m_tyarrlc(&r, fn->width, x, fn->type), tyany_ptr(w), tyany_ptr(x), ia); + if (got) goto rec; + goto decG_ret; + } + case u_call_bit: { + u64* rp; r = m_bitarrc(&rp, x); + fn->uFn((u8*)rp, tyany_ptr(w), tyany_ptr(x), ia); + goto decG_ret; + } + + case u_call_wxf64sq: { + f64* rp; r = m_f64arrc(&rp, x); + fn->uFn((u8*)rp, tyany_ptr(w = toF64Any(w)), tyany_ptr(x = toF64Any(x)), ia); + r = num_squeeze(r); + goto decG_ret; + } + case c_call_wxi8: { + assert(TI(x,elType)==el_bit && TI(w,elType)==el_bit); + w = taga(cpyI8Arr(w)); + x = taga(cpyI8Arr(x)); + goto c_call_rbyte; + } + case e_call_sqx: { + assert(TI(x,elType)==el_f64); + x = num_squeeze(x); + u8 xe = TI(x,elType); + if (xe==el_f64) goto rec; + e = &table->entsAA[TI(w,elType)*8 + xe]; + goto newEnt; + } + case fail: goto rec; + } + } else { + goto rec; + } + } else { + if (isArr(x)) { + u8 xe = TI(x, elType); + if (xe==el_B) goto rec; + goto rec; + } else { // TODO decide if this is even a case that needs to be handled here + return table->mainFn(w, w, x); + } + } + + rec: + return arith_recd(table->mainFn, w, x); + decG_ret: + decG(w); decG(x); + return r; +} + + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-variable" +#include "../gen/dyarith2.c" +#pragma GCC diagnostic pop + +static void rootAAu_f64_f64_f64(u8* r, u8* w, u8* x, u64 len) { for (u64 i = 0; i < len; i++) ((f64*)r)[i] = pow(((f64*)x)[i], 1.0/((f64*)w)[i]); } +static void powAAu_f64_f64_f64(u8* r, u8* w, u8* x, u64 len) { for (u64 i = 0; i < len; i++) ((f64*)r)[i] = pow(((f64*)w)[i], ((f64*)x)[i]); } +static void stileAAu_f64_f64_f64(u8* r, u8* w, u8* x, u64 len) { for (u64 i = 0; i < len; i++) ((f64*)r)[i] = pfmod(((f64*)x)[i], ((f64*)w)[i]); } +static void logAAu_f64_f64_f64(u8* r, u8* w, u8* x, u64 len) { for (u64 i = 0; i < len; i++) ((f64*)r)[i] = log(((f64*)x)[i])/log(((f64*)w)[i]); } + +#include "../gen/dyarithTables.c" + + +B internalTemp_c2(B t, B w, B x) { + return do_dyArith(&addDyTable, w, x); +} diff --git a/src/singeli/src/dyarith.singeli b/src/singeli/src/dyarith.singeli index cb018dcc..4b115114 100644 --- a/src/singeli/src/dyarith.singeli +++ b/src/singeli/src/dyarith.singeli @@ -112,15 +112,15 @@ arithAA{F,VT}(w: *u8, x: *u8, r: *u8, len: Size) : Size = { def c{x} = *eltype{V arithAS{F,VT}(w: *u8, x: u64, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithAS{VT, F, *T ~~ w, cast_fB{T, x}, *T~~r, len} } arithSA{F,VT}(w: u64, x: *u8, r: *u8, len: Size) : Size = { def T=eltype{VT}; arithSA{VT, F, cast_fB{T, w}, *T ~~ x, *T~~r, len} } -'avx2_addAA_i8' = arithAA{__add,[32]i8 }; 'avx2_addAS_i8' = arithAS{__add,[32]i8 }; 'avx2_addSA_i8' = arithSA{__add,[32]i8 } -'avx2_addAA_i16' = arithAA{__add,[16]i16}; 'avx2_addAS_i16' = arithAS{__add,[16]i16}; 'avx2_addSA_i16' = arithSA{__add,[16]i16} -'avx2_addAA_i32' = arithAA{__add,[ 8]i32}; 'avx2_addAS_i32' = arithAS{__add,[ 8]i32}; 'avx2_addSA_i32' = arithSA{__add,[ 8]i32} -'avx2_addAA_f64' = arithAA{__add,[ 4]f64}; 'avx2_addAS_f64' = arithAS{__add,[ 4]f64}; 'avx2_addSA_f64' = arithSA{__add,[ 4]f64} -'avx2_subAA_i8' = arithAA{__sub,[32]i8 }; 'avx2_subAS_i8' = arithAS{__sub,[32]i8 }; 'avx2_subSA_i8' = arithSA{__sub,[32]i8 } -'avx2_subAA_i16' = arithAA{__sub,[16]i16}; 'avx2_subAS_i16' = arithAS{__sub,[16]i16}; 'avx2_subSA_i16' = arithSA{__sub,[16]i16} -'avx2_subAA_i32' = arithAA{__sub,[ 8]i32}; 'avx2_subAS_i32' = arithAS{__sub,[ 8]i32}; 'avx2_subSA_i32' = arithSA{__sub,[ 8]i32} -'avx2_subAA_f64' = arithAA{__sub,[ 4]f64}; 'avx2_subAS_f64' = arithAS{__sub,[ 4]f64}; 'avx2_subSA_f64' = arithSA{__sub,[ 4]f64} -'avx2_mulAA_i8' = arithAA{__mul,[32]i8 }; 'avx2_mulAS_i8' = arithAS{__mul,[32]i8 }; 'avx2_mulSA_i8' = arithSA{__mul,[32]i8 } -'avx2_mulAA_i16' = arithAA{__mul,[16]i16}; 'avx2_mulAS_i16' = arithAS{__mul,[16]i16}; 'avx2_mulSA_i16' = arithSA{__mul,[16]i16} -'avx2_mulAA_i32' = arithAA{__mul,[ 8]i32}; 'avx2_mulAS_i32' = arithAS{__mul,[ 8]i32}; 'avx2_mulSA_i32' = arithSA{__mul,[ 8]i32} -'avx2_mulAA_f64' = arithAA{__mul,[ 4]f64}; 'avx2_mulAS_f64' = arithAS{__mul,[ 4]f64}; 'avx2_mulSA_f64' = arithSA{__mul,[ 4]f64} \ No newline at end of file +'avx2_addAS_i8' = arithAS{__add,[32]i8 }; 'avx2_addSA_i8' = arithSA{__add,[32]i8 } +'avx2_addAS_i16' = arithAS{__add,[16]i16}; 'avx2_addSA_i16' = arithSA{__add,[16]i16} +'avx2_addAS_i32' = arithAS{__add,[ 8]i32}; 'avx2_addSA_i32' = arithSA{__add,[ 8]i32} +'avx2_addAS_f64' = arithAS{__add,[ 4]f64}; 'avx2_addSA_f64' = arithSA{__add,[ 4]f64} +'avx2_subAS_i8' = arithAS{__sub,[32]i8 }; 'avx2_subSA_i8' = arithSA{__sub,[32]i8 } +'avx2_subAS_i16' = arithAS{__sub,[16]i16}; 'avx2_subSA_i16' = arithSA{__sub,[16]i16} +'avx2_subAS_i32' = arithAS{__sub,[ 8]i32}; 'avx2_subSA_i32' = arithSA{__sub,[ 8]i32} +'avx2_subAS_f64' = arithAS{__sub,[ 4]f64}; 'avx2_subSA_f64' = arithSA{__sub,[ 4]f64} +'avx2_mulAS_i8' = arithAS{__mul,[32]i8 }; 'avx2_mulSA_i8' = arithSA{__mul,[32]i8 } +'avx2_mulAS_i16' = arithAS{__mul,[16]i16}; 'avx2_mulSA_i16' = arithSA{__mul,[16]i16} +'avx2_mulAS_i32' = arithAS{__mul,[ 8]i32}; 'avx2_mulSA_i32' = arithSA{__mul,[ 8]i32} +'avx2_mulAS_f64' = arithAS{__mul,[ 4]f64}; 'avx2_mulSA_f64' = arithSA{__mul,[ 4]f64} \ No newline at end of file diff --git a/src/singeli/src/dyarith2.singeli b/src/singeli/src/dyarith2.singeli new file mode 100644 index 00000000..bc5e9c2c --- /dev/null +++ b/src/singeli/src/dyarith2.singeli @@ -0,0 +1,134 @@ +# include './base' +# include './f64' +# include './cbqnDefs' +# include './sse3' +# include './avx' +# include './avx2' +# include './bitops' +# include './mask' +include './dyarith' + + +def rootty{T & isprim{T}} = T +def rootty{T & isvec{T}} = eltype{T} + +def ty_sc{O, R} = R # keep floats as-is +def ty_sc{O, R & issigned{O} & isunsigned{rootty{R}}} = ty_s{R} +def ty_sc{O, R & isunsigned{O} & issigned{rootty{R}}} = ty_u{R} + +def bqn_or{a, b} = (a+b)-(a*b) + +def fmt_op{X== __add}= '__add' +def fmt_op{X== __sub}= '__sub' +def fmt_op{X== __mul}= '__mul' +def fmt_op{X== __div}= '__div' +def fmt_op{X== __or}= '__or' +def fmt_op{X== __and}= '__and' +def fmt_op{X==bqn_or}= 'bqn_or' +def fmt_op{X== min}= 'min' +def fmt_op{X== max}= 'max' + +def arithChk1{F==__add, M, w:T, x:T, r:T & issigned{rootty{T}}} = anyneg{M{(w^r) & (x^r)}} +def arithChk1{F==__sub, M, w:T, x:T, r:T & issigned{rootty{T}}} = anyneg{M{(w^x) & (w^r)}} +def arithChk1{F==__add, M, w:T, x:T, r:T & isvec{T} & width{eltype{T}}<=16} = anyne{__adds{w,x}, r, M} +def arithChk1{F==__sub, M, w:T, x:T, r:T & isvec{T} & width{eltype{T}}<=16} = anyne{__subs{w,x}, r, M} + +def arithChk2{F, M, w:T, x:T & (match{F,__add} | match{F,__sub})} = { + r:= F{w,x} + tup{r, arithChk1{F, M, w, x, r}} +} +def arithChk2{F, M, w:T, x:T & match{F,__mul} & isvec{T} & i16==eltype{T}} = { + rl:= __mul {w,x} + rh:= __mulhi{w,x} + tup{rl, anyne{rh, rl>>15, M}} +} +def arithChk2{F, M, w:T, x:T & match{F,__mul} & isvec{T} & i8==eltype{T}} = { + def wp = unpackQ{w, T ~~ (broadcast{T,0}>w)} + def xp = unpackQ{x, T ~~ (broadcast{T,0}>x)} + def rp = each{__mul, wp, xp} + def bad = each{{v} => [16]i16 ~~ ((v<<8)>>8 != v), rp} + if (M{0}) { # masked check + tup{packQ{rp}, any{M{packQ{bad}}}} + } else { # unmasked check; can do check in a simpler way + tup{packQ{rp}, any{tupsel{0,bad}|tupsel{1,bad}}} + } +} +def arithChk2{F, M, w:T, x:T & match{F,__mul} & isvec{T} & i32==eltype{T}} = { + max:= [8]f32 ~~ broadcast{[8]u32, 0x4efffffe} + def cf32{x} = emit{[8]f32, '_mm256_cvtepi32_ps', x} + f32mul:= cf32{w} * cf32{x} + tup{w*x, any{M{abs{f32mul} >= max}}} + # TODO fallback to the below if the above fails + # TODO don't do this, but instead shuffle one half, do math, unshuffle that half + # def wp = unpackQ{w, broadcast{T, 0}} + # def xp = unpackQ{x, broadcast{T, 0}} + # def rp = each{__mul32, wp, xp} + # def T2 = to_el{i64, T} + # def bad = each{{v} => { + # (((T2~~v) + broadcast{T2,0x80000000}) ^ broadcast{T2, cast{i64,1}<<63}) > broadcast{T2, cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF}} + # }, rp} + # tup{packQQ{each{{v} => v&broadcast{T2, 0xFFFFFFFF}, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}} this doesn't use M +} + +def runner{u, F} = { + def c = ~u + + def run{F, OO, M, w, x} = { show{'todo', fmt_op{F}, c, w, x}; emit{void,'__builtin_abort'}; w } + + def run{F, OO, M, w:T, x:T & c} = { + def r2 = arithChk2{F, M, w, x} + if (rare{tupsel{1,r2}}) OO{} + tupsel{0,r2} + } + + def run{F, OO, M, w, x & u} = F{w, x} # trivial base implementation + + def run{F, OO, M, w:VU, x:VS & isunsigned{eltype{VU}} & issigned{eltype{VS}}} = { # 'a'+3, 'a'-3 + top:= broadcast{VU, 1<<(width{eltype{VU}}-1)} + top ^ VU~~run{F, OO, M, VS~~(w^top), x} + } + # def run{F==__add, OO, M, w:VS, x:VU & issigned{eltype{VS}} & isunsigned{eltype{VU}}} = run{F, OO, M, x, w} # 3+'a' → 'a'+3 + run +} + +def arithAAimpl{vw, mode, F, W, X, R, w, x, r, len} = { + # show{fmt_op{F}, mode, W, X, R} + if (R==u1) { + def bulk = vw/64; + def TY = [bulk]u64 + maskedLoop{bulk, cdiv{len, 64}, {i, M} => { + cw:= loadBatch{*u64~~w, i, TY} + cx:= loadBatch{*u64~~x, i, TY} + storeBatch{*u64~~r, i, F{cw, cx}, M} + }} + } else { + def bulk = vw / max{max{width{W}, width{X}}, width{R}} + def overflow = tern{mode==1, {i}=>return{i}, tern{mode==2, {i}=>return{1}, 0}} + def TY = [bulk]R + + def run = runner{match{overflow, 0}, F} + + maskedLoop{bulk, len, {i, M} => { + cw:= loadBatch{*W~~w, i, ty_sc{W, TY}} + cx:= loadBatch{*X~~x, i, ty_sc{X, TY}} + storeBatch{*R~~r, i, TY~~run{F, {} => overflow{i}, M, cw, cx}, M} + }} + } +} + +arithAAc{vw, mode, F, W, X, R}(r:*u8, w:*u8, x:*u8, len:u64) : u64 = { + arithAAimpl{vw, mode, F, W, X, R, w, x, r, len} + if (mode==1) len + else 0 +} +arithAAu{vw, mode, F, W, X, R}(r:*u8, w:*u8, x:*u8, len:u64) : void = { + arithAAimpl{vw, mode, F, W, X, R, w, x, r, len} +} + +def arithAA{mode, F, W, X, R} = { + def vw = 256 + if (mode==1 or mode==2) arithAAc{vw, mode, F, W, X, R} + else arithAAu{vw, mode, F, W, X, R} +} + +include './../gen/dyarithDefs' \ No newline at end of file diff --git a/src/singeli/src/genArithTables.bqn b/src/singeli/src/genArithTables.bqn new file mode 100644 index 00000000..c428ed6a --- /dev/null +++ b/src/singeli/src/genArithTables.bqn @@ -0,0 +1,197 @@ +debug ← 0=≠•args +•term.OutRaw⍟debug ∾⟨@+27 ⋄ "[H" ⋄ @+27 ⋄ "[2J" ⋄ @+27 ⋄ "[3J"⟩ +•Out⍟(3×debug) 200⥊'-' +impls‿tables ← 2↑•args + +T ← =⊸{<⎉𝕨⍉⍟𝕨>𝕩} + +bit←0 +i8 ←1 ⋄ c8 ←5 +i16←2 ⋄ c16←6 +i32←3 ⋄ c32←7 +f64←4 +u‿c‿e‿rsq‿xsq ← ↕5 +tmNoNext ← 1‿0‿1‿1‿1 +tmCheckedFn ← 0‿1‿1‿0‿0 +tmLit ← "uceue" + +tyWidth ← 99‿0‿1‿2‿3‿0‿1‿2 # in log2(bytes) +tyName ← "bit"‿"i8"‿"i16"‿"i32"‿"f64"‿"c8"‿"c16"‿"c32" +tySiType ← "u1"‿"i8"‿"i16"‿"i32"‿"f64"‿"u8"‿"u16"‿"u32" + +ShowTbl ← { + # 𝕩 ↩ {∧´⥊𝕩≡¨@? @; 𝕩/˜𝕩≢¨@}¨ 𝕩 + 𝕩 ≍˘⍟=¨ ↩ + •Show 𝕩 + #•Out •Repr 𝕩 +}⍟debug + +mdNone ← 0 +mdExact ← 1 +mdCast ← 2 +mdBit2i8 ← 3 +mdF64Chr ← 4 +mdSqF64 ← 5 + +singeliFns ← ⟨⟩ +GetSingeli ← { tm‿w‿x‿r‿id‿si: + { + r≡@? @; + name ← ∾⟨id, "AA", tm⊑tmLit, "_", w⊑tyName, "_", x⊑tyName, "_", r⊑tyName⟩ + singeliFns∾↩