diff --git a/makefile b/makefile index 4af3918e..c5ea6faa 100644 --- a/makefile +++ b/makefile @@ -93,7 +93,7 @@ ${bd}/%.o: src/%.c @echo $< | cut -c 5- @$(CMD) $@.d -o $@ -c $< -utils: builddir ${addprefix ${bd}/, utf.o hash.o file.o mut.o each.o} +utils: builddir ${addprefix ${bd}/, utf.o hash.o file.o mut.o each.o bits.o} ${bd}/%.o: src/utils/%.c @echo $< | cut -c 5- @$(CMD) $@.d -o $@ -c $< diff --git a/src/builtins/arithd.c b/src/builtins/arithd.c index 3410a2e2..a3dedadb 100644 --- a/src/builtins/arithd.c +++ b/src/builtins/arithd.c @@ -62,6 +62,37 @@ #define RI32(A) i32* rp; B r=m_i32arrc(&rp, A); #define RF(A) f64* rp; B r=m_f64arrc(&rp, A); + static NOINLINE u8 iMakeEq(B* w, B* x, u8 we, u8 xe) { + B s = weia; + + bool b0 = bp[0]&1; + bool both = false; + for (usz i = 0; i < ia; i++) if (bitp_get(bp,i) != b0) { both=true; break; } + + B e0=m_f64(0), e1=m_f64(0); // initialized to have something to decrement later + bool h0=both || b0==0; if (h0) e0 = bitX? f(bi_N, inc(w), m_f64(0)) : f(bi_N, m_f64(0), inc(x)); + bool h1=both || b0==1; if (h1) e1 = bitX? f(bi_N, w, m_f64(1)) : f(bi_N, m_f64(1), x); + // non-bitarr arg has been consumed + B r = bit_sel(b, e0, h0, e1, h1); // and now the bitarr arg is consumed too + dec(e0); dec(e1); + return r; + } + #define DOF(EXPR,A,W,X) { \ for (usz i = 0; i < ia; i++) { \ f64 wv = W; f64 xv = X; \ @@ -92,7 +123,7 @@ } \ dec(w); dec(x); return r; \ } - #define GC2i(SYMB, NAME, EXPR, EXTRA) B NAME##_c2(B t, B w, B x) { \ + #define GC2i(SYMB, NAME, EXPR, EXTRA, BIT, BX) B NAME##_c2(B t, B w, B x) { \ if (isF64(w) & isF64(x)) {f64 wv=w.f,xv=x.f;return m_f64(EXPR);} \ EXTRA \ if (isArr(w)|isArr(x)) { \ @@ -101,6 +132,16 @@ usz ia = a(x)->ia; \ u8 we = TI(w,elType); \ u8 xe = TI(x,elType); \ + if ((we==el_bit | xe==el_bit) && (we|xe)<=el_f64) { \ + if (BIT && (we|xe)==0) { u64* rp; B r = m_bitarrc(&rp, x); \ + u64* wp = bitarr_ptr(w); u64* xp = bitarr_ptr(x); \ + for (usz i=0; iia; u8 xe = TI(x,elType); \ + if (xe==el_bit) { \ + if (BIT && q_fbit(w.f)) { u64* rp; B r=m_bitarrc(&rp,x); \ + u64 wv = bitx(w); u64* xp = bitarr_ptr(x); \ + for (usz i=0; iia; u8 we = TI(w,elType); \ + if (we==el_bit) { \ + if (BIT && q_fbit(x.f)) { u64* rp; B r=m_bitarrc(&rp,w); \ + u64 xv = bitx(x); u64* wp = bitarr_ptr(w); \ + for (usz i=0; iCHR_MAX)thrM("-: Invalid character"); return m_c32((u32)r); } if (isC32(w) & isC32(x)) return m_f64((i32)(u32)w.u - (i32)(u32)x.u); @@ -203,16 +258,16 @@ GC2i("-", sub, wv-xv, { } } } -}) +}, 0, -) GC2i("¬", not, 1+wv-xv, { if (isC32(w) & isF64(x)) { u64 r = (u64)(1+(i32)o2cu(w)-o2i64(x)); if(r>CHR_MAX)thrM("¬: Invalid character"); return m_c32((u32)r); } if (isC32(w) & isC32(x)) return m_f64(1 + (i32)(u32)w.u - (i32)(u32)x.u); -}) -GC2i("×", mul, wv*xv, {}) -GC2i("∧", and, wv*xv, {}) -GC2i("∨", or , (wv+xv)-(wv*xv), {}) -GC2i("⌊", floor, wv>xv?xv:wv, {}) // optimizer optimizes out the fallback mess -GC2i("⌈", ceil , wv>xv?wv:xv, {}) +}, 0, -) +GC2i("×", mul, wv*xv, {}, 1, &) +GC2i("∧", and, wv*xv, {}, 1, &) +GC2i("∨", or , (wv+xv)-(wv*xv), {}, 1, |) +GC2i("⌊", floor, wv>xv?xv:wv, {}, 1, &) // optimizer optimizes out the fallback mess +GC2i("⌈", ceil , wv>xv?wv:xv, {}, 1, |) GC2f("÷", div , w.f/x.f, {}) GC2f("⋆", pow , pow(w.f, x.f), {}) diff --git a/src/builtins/arithm.c b/src/builtins/arithm.c index d868c5d7..46125ceb 100644 --- a/src/builtins/arithm.c +++ b/src/builtins/arithm.c @@ -10,11 +10,11 @@ static inline B arith_recm(BB2B f, B x) { return withFill(r, fx); } -#define GC1i(SYMB, NAME, FEXPR, IBAD, IEXPR) B NAME##_c1(B t, B x) { \ +#define GC1i(SYMB,NAME,FEXPR,IBAD,IEXPR,BX) B NAME##_c1(B t, B x) { \ if (isF64(x)) { f64 v = x.f; return m_f64(FEXPR); } \ if (RARE(!isArr(x))) thrM(SYMB ": Expected argument to be a number"); \ u8 xe = TI(x,elType); \ - i64 sz = a(x)->ia; \ + i64 sz = a(x)->ia; BX \ if (xe==el_i8) { i8 MAX=I8_MAX; i8 MIN=I8_MIN; i8* xp=i8any_ptr(x); i8* rp; B r=m_i8arrc(&rp,x); \ for (i64 i = 0; i < sz; i++) { i8 v = xp[i]; if (RARE(IBAD)) { dec(r); goto base; } rp[i] = IEXPR; } \ dec(x); (void)MIN;(void)MAX; return r; \ @@ -38,12 +38,18 @@ static inline B arith_recm(BB2B f, B x) { #define P1(N) { if(isArr(x)) { SLOW1("arithm " #N, x); return arith_recm(N##_c1, x); } } B add_c1(B t, B x) { return x; } -GC1i("-", sub, -v, v== MIN, -v) // change icond to v==-v to support ¯0 (TODO that won't work for i8/i16) -GC1i("¬", not, 1-v, v<=-MAX, 1-v) -GC1i("|", stile, fabs(v), v== MIN, v<0?-v:v) -GC1i("⌊", floor, floor(v), 0, v) -GC1i("⌈", ceil, ceil(v), 0, v) -GC1i("×", mul, v==0?0:v>0?1:-1, 0, v==0?0:v>0?1:-1) +GC1i("-", sub, -v, v== MIN, -v, {}) // change icond to v==-v to support ¯0 (TODO that won't work for i8/i16) +GC1i("|", stile, fabs(v), v== MIN, v<0?-v:v,{}) +GC1i("⌊", floor, floor(v), 0, v, {}) +GC1i("⌈", ceil, ceil(v), 0, v, {}) +GC1i("×", mul, v==0?0:v>0?1:-1, 0, v==0?0:v>0?1:-1,{}) +GC1i("¬", not, 1-v, v<=-MAX, 1-v, { + if(xe==el_bit) { + u64* xp=bitarr_ptr(x); u64* rp; B r=m_bitarrc(&rp,x); + for (u64 i = 0; i < BIT_N(sz); i++) rp[i] = ~xp[i]; + dec(x); return r; + } +}) B div_c1(B t, B x) { if (isF64(x)) return m_f64( 1/x.f ); P1( div); thrM("÷: Getting reciprocal of non-number"); } B pow_c1(B t, B x) { if (isF64(x)) return m_f64( exp(x.f)); P1( pow); thrM("⋆: Getting exp of non-number"); } diff --git a/src/builtins/cmp.c b/src/builtins/cmp.c index 7948217b..0c99682d 100644 --- a/src/builtins/cmp.c +++ b/src/builtins/cmp.c @@ -7,13 +7,14 @@ return arith_recd(N##_c2, w, x); \ }} -#define AL(X) i8* rp; B r = m_i8arrc(&rp, X); +#define AL(X) u64* rp; B r = m_bitarrc(&rp, X); usz ria=a(r)->ia; usz bia = BIT_N(ria); -static NOINLINE u8 makeEq(B* w, B* x, u8 we, u8 xe) { // returns el_MAX if failed +static NOINLINE u8 aMakeEq(B* w, B* x, u8 we, u8 xe) { // returns el_MAX if failed B s = wexe?we:xe; if (elNum(we) & elNum(xe)) { switch(me) { default: UD; + case el_i8: s = taga(cpyI8Arr (s)); break; case el_i16: s = taga(cpyI16Arr(s)); break; case el_i32: s = taga(cpyI32Arr(s)); break; case el_f64: s = taga(cpyF64Arr(s)); break; @@ -28,7 +29,7 @@ static NOINLINE u8 makeEq(B* w, B* x, u8 we, u8 xe) { // returns el_MAX if faile return me; } -#define CMP_IMPL(CHR,OP,FC,CF) \ +#define CMP_IMPL(CHR, OP, FC, CF, BX) \ if (isF64(w)&isF64(x)) return m_i32(w.f OP x.f); \ if (isC32(w)&isC32(x)) return m_i32(w.u OP x.u); \ if (isF64(w)&isC32(x)) return m_i32(FC); \ @@ -39,70 +40,72 @@ static NOINLINE u8 makeEq(B* w, B* x, u8 we, u8 xe) { // returns el_MAX if faile if (xe==el_B) goto end; \ if (rnk(w)==rnk(x)) { if (!eqShape(w, x)) thrF(CHR": Expected equal shape prefix (%H ≡ ≢𝕨, %H ≡ ≢𝕩)", w, x); \ if (we!=xe) { B tw=w,tx=x; \ - we = makeEq(&tw, &tx, we, xe); \ + we = aMakeEq(&tw, &tx, we, xe); \ if (we==el_MAX) goto end; \ w=tw; x=tx; \ } \ - AL(x) usz ria=a(r)->ia; \ + AL(x) \ switch(we) { default: UD; \ - case el_i8 : { i8* wp=i8any_ptr (w); i8* xp=i8any_ptr (x); for(usz i=0;iia; \ + } else { AL(w) \ switch(we) { default: UD; \ - case el_i8: { if (!q_i8 (x)) break; i8 xv=o2iu(x); i8* wp=i8any_ptr (w); for(usz i=0;iia; \ + } else if (isArr(x)) { u8 xe = TI(x,elType); if (xe==el_B) goto end; AL(x) \ switch(xe) { default: UD; \ - case el_i8: { if (!q_i8 (w)) break; i8 wv=o2iu(w); i8* xp=i8any_ptr (x); for(usz i=0;i=, 0, 1) -CMP("<", lt, < , 1, 0) -CMP(">", gt, > , 0, 1) +CMP("≤", le, <=, 1, 0, ~wv | xv) +CMP("≥", ge, >=, 0, 1, wv | ~xv) +CMP("<", lt, < , 1, 0, ~wv & xv) +CMP(">", gt, > , 0, 1, wv & ~xv) #undef CMP B eq_c2(B t, B w, B x) { - CMP_IMPL("=", ==, 0, 0); + CMP_IMPL("=", ==, 0, 0, ~wv^xv); P2(eq); B r = m_i32(atomEqual(w, x)); dec(w); dec(x); return r; } B ne_c2(B t, B w, B x) { - CMP_IMPL("≠", !=, 1, 1); + CMP_IMPL("≠", !=, 1, 1, wv^xv); P2(ne); B r = m_i32(!atomEqual(w, x)); dec(w); dec(x); diff --git a/src/builtins/fns.c b/src/builtins/fns.c index 6bb2283a..58fac5be 100644 --- a/src/builtins/fns.c +++ b/src/builtins/fns.c @@ -247,10 +247,10 @@ B memberOf_c1(B t, B x) { if (rnk(x)!=1) x = toCells(x); usz xia = a(x)->ia; - i8* rp; B r = m_i8arrv(&rp, xia); + u64* rp; B r = m_bitarrv(&rp, xia); H_Sb* set = m_Sb(64); SGetU(x) - for (usz i = 0; i < xia; i++) rp[i] = !ins_Sb(&set, GetU(x,i)); + for (usz i = 0; i < xia; i++) bitp_set(rp, i, !ins_Sb(&set, GetU(x,i))); free_Sb(set); dec(x); return r; } @@ -264,8 +264,8 @@ B memberOf_c2(B t, B w, B x) { SGetU(x) SGetU(w) for (usz i = 0; i < xia; i++) mk_Sb(&set, GetU(x,i), &had); - i8* rp; B r = m_i8arrv(&rp, wia); - for (usz i = 0; i < wia; i++) rp[i] = has_Sb(set, GetU(w,i)); + u64* rp; B r = m_bitarrv(&rp, wia); + for (usz i = 0; i < wia; i++) bitp_set(rp, i, has_Sb(set, GetU(w,i))); free_Sb(set); dec(w);dec(x); return r; } diff --git a/src/builtins/grade.h b/src/builtins/grade.h index ee025c18..55c07d5a 100644 --- a/src/builtins/grade.h +++ b/src/builtins/grade.h @@ -19,7 +19,17 @@ B GRADE_CAT(c1)(B t, B x) { u8 xe = TI(x,elType); i32* rp; B r = m_i32arrv(&rp, ia); - if (xe==el_i8) { + if (xe==el_bit) { + u64* xp = bitarr_ptr(x); + u64 sum = bit_sum(xp, ia); + u64 r0 = 0; + u64 r1 = GRADE_UD(ia-sum, sum); + for (usz i = 0; i < ia; i++) { + if (bitp_get(xp,i)^GRADE_UD(0,1)) rp[r1++] = i; + else rp[r0++] = i; + } + dec(x); return r; + } else if (xe==el_i8) { i8* xp = i8any_ptr(x); i32 min=-128, range=256; TALLOC(usz, tmp, range+1); diff --git a/src/builtins/internal.c b/src/builtins/internal.c index 50241801..296a6f81 100644 --- a/src/builtins/internal.c +++ b/src/builtins/internal.c @@ -69,7 +69,8 @@ B info_c1(B t, B x) { F(Ac32) F(Sc32) F(Ac32Inc) F(Sc32Inc) \ F(Af64) F(Sf64) F(Af64Inc) F(Sf64Inc) \ F(Ah) F(Sh) F(AhInc) F(ShInc) \ - F(Af) F(Sf) F(AfInc) F(SfInc) + F(Af) F(Sf) F(AfInc) F(SfInc) \ + F(Ab) F(AbInc) #define F(X) static B v_##X; FOR_VARIATION(F) @@ -95,19 +96,21 @@ B listVariations_c2(B t, B w, B x) { B xf = getFillQ(x); bool ah = c_rmFill || noFill(xf); bool ai8=false, ai16=false, ai32=false, af64=false, - ac8=false, ac16=false, ac32=false; + ac8=false, ac16=false, ac32=false, abit=false; usz xia = a(x)->ia; SGetU(x) if (isNum(xf)) { - f64 min=0, max=0; - if (xe==el_i8) { } + i32 min=I32_MAX, max=I32_MIN; + if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for (usz i = 0; i < xia; i++) { if (xp[i]>max) max=xp[i]; if (xp[i]max) max=xp[i]; if (xp[i]max) max=xp[i]; if (xp[i]max) max=xp[i]; if (xp[i]max) max=xp[i]; if (xp[i]max) max=c.f; if (c.f=0 && max<=1; + onlyF64: af64 = true; } else if (isC32(xf)) { u32 max = 0; @@ -122,6 +125,7 @@ B listVariations_c2(B t, B w, B x) { } noSpec:; B r = emptyHVec(); + if(abit) { r=vec_add(r,inc(v_Ab )); if(c_incr) { r=vec_add(r,inc(v_AbInc )); } } if(ai8 ) { r=vec_add(r,inc(v_Ai8 ));r=vec_add(r,inc(v_Si8 )); if(c_incr) { r=vec_add(r,inc(v_Ai8Inc ));r=vec_add(r,inc(v_Si8Inc )); } } if(ai16) { r=vec_add(r,inc(v_Ai16));r=vec_add(r,inc(v_Si16)); if(c_incr) { r=vec_add(r,inc(v_Ai16Inc));r=vec_add(r,inc(v_Si16Inc)); } } if(ai32) { r=vec_add(r,inc(v_Ai32));r=vec_add(r,inc(v_Si32)); if(c_incr) { r=vec_add(r,inc(v_Ai32Inc));r=vec_add(r,inc(v_Si32Inc)); } } @@ -173,7 +177,8 @@ B variation_c2(B t, B w, B x) { if (*wp == 'A' || *wp == 'S') { bool slice = *wp == 'S'; wp++; - if (u8_get(&wp, wpE, "i8" )) res = taga(cpyI8Arr(inc(x))); + if (u8_get(&wp, wpE, "b" )) res = taga(cpyBitArr(inc(x))); + else if (u8_get(&wp, wpE, "i8" )) res = taga(cpyI8Arr(inc(x))); else if (u8_get(&wp, wpE, "i16")) res = taga(cpyI16Arr(inc(x))); else if (u8_get(&wp, wpE, "i32")) res = taga(cpyI32Arr(inc(x))); else if (u8_get(&wp, wpE, "c8" )) res = taga(cpyC8Arr(inc(x))); diff --git a/src/builtins/md1.c b/src/builtins/md1.c index 58ebd6eb..2ddff559 100644 --- a/src/builtins/md1.c +++ b/src/builtins/md1.c @@ -96,6 +96,8 @@ B scan_c1(Md1D* d, B x) { B f = d->f; if (xr==1 && xe<=el_f64 && isFun(f) && v(f)->flags) { u8 rtid = v(f)->flags-1; if (rtid==0) { // + + if (iaf; if (xe==el_i32) { i32* xp=i32any_ptr(x); i32* rp; B r=m_i32arrv(&rp, ia); i32 c=I32_MIN; for (usz i=0; ic)c=xp[i]; rp[i]=c; } dec(x); return r; } } if (rtid==14) { // ≠ - f64 x0 = IGetU(x,0).f; if (x0 != (i8)x0) goto base; - if (xe==el_i8 ) { i8* xp=i8any_ptr (x); i8* rp; B r=m_i8arrv(&rp, ia); i8 c=(i8)x0; rp[0]=c; for (usz i=1; itype==t_harr && reusable(x); usz i = 0; @@ -145,6 +149,7 @@ B scan_c2(Md1D* d, B w, B x) { B f = d->f; u8 rtid = v(f)->flags-1; i32 wv = o2iu(w); if (rtid==0) { // + + if (xe==el_bit) { u64* xp=bitarr_ptr(x); i32* rp; B r=m_i32arrv(&rp, ia); i64 c=wv; for (usz i=0; if; if (xe==el_i32 && wv==(i32)wv) { i32* xp=i32any_ptr(x); i32* rp; B r=m_i32arrv(&rp, ia); i32 c=wv; for (usz i=0; ic)c=xp[i]; rp[i]=c; } dec(x); return r; } } if (rtid==14) { // ≠ - if (wv != (i8)wv) goto base; - if (xe==el_i8 ) { i8* xp=i8any_ptr (x); i8* rp; B r=m_i8arrv(&rp, ia); i8 c=wv; for (usz i=0; if; if (isFun(f) && v(f)->flags && xe<=el_f64) { u8 rtid = v(f)->flags-1; if (rtid==0) { // + + if (xe==el_bit) { B r = m_f64(bit_sum(bitarr_ptr(x), ia)); dec(x); return r; } if (xe==el_i8 ) { i8* xp = i8any_ptr (x); i64 c=0; for (usz i=0; if; if (xe==el_i32) { i32* xp = i32any_ptr(x); i32 c=I32_MIN; for (usz i=0; ic) c=xp[i]; dec(x); return m_i32(c); } } if (rtid==11) { // ∨ - if (xe==el_i8 ) { i8* xp = i8any_ptr (x); bool q=0; for (usz i=0; if; i32 wi = o2iu(w); u8 rtid = v(f)->flags-1; if (rtid==0) { // + + if (xe==el_bit) { B r = m_f64(wi + bit_sum(bitarr_ptr(x), ia)); dec(x); return r; } if (xe==el_i8 ) { i8* xp = i8any_ptr (x); i64 c=wi; for (usz i=0; if; if (xe==el_i32) { i32* xp = i32any_ptr(x); i32 c=wi; for (usz i=0; ic) c=xp[i]; dec(x); return m_i32(c); } } if (rtid==11 && (wi&1)==wi) { // ∨ + if (xe==el_bit) { u64* xp = bitarr_ptr(x); bool r=0; if (wi) r=1; else for (usz i=0; iia; - #define CASE(T,E) if (TI(x,elType)==el_##T) { \ + u8 xe = TI(x,elType); + #define CASE(T,E) if (xe==el_##T) { \ E* rp; B r = m_##T##arrc(&rp, w); \ E* xp = T##any_ptr(x); \ - for (usz i = 0; i < wia; i++) { \ - rp[i] = xp[WRAP(wp[i], xia, thrF("⊏: Indexing out-of-bounds (%i∊𝕨, %s≡≠𝕩)", wp[i], xia))]; \ - } \ + for (usz i = 0; i < wia; i++) rp[i] = xp[WRAP(wp[i], xia, thrF("⊏: Indexing out-of-bounds (%i∊𝕨, %s≡≠𝕩)", wp[i], xia))]; \ dec(w); dec(x); return r; \ } - #define TYPE(W) { \ - W* wp = W##any_ptr(w); \ - CASE(i8,i8) CASE(i16,i16) CASE(i32,i32) \ + #define TYPE(W) { W* wp = W##any_ptr(w); \ + if (xe==el_bit) { u64* xp=bitarr_ptr(x); \ + u64* rp; B r = m_bitarrc(&rp, w); \ + for (usz i = 0; i < wia; i++) bitp_set(rp, i, bitp_get(xp, WRAP(wp[i], xia, thrF("⊏: Indexing out-of-bounds (%i∊𝕨, %s≡≠𝕩)", wp[i], xia)))); \ + dec(w); dec(x); return r; \ + } \ + CASE(i8,i8) CASE(i16,i16) CASE(i32,i32) \ CASE(c8,u8) CASE(c16,u16) CASE(c32,u32) CASE(f64,f64) \ usz i=0; HArr_p r = m_harrs(wia, &i); \ if (v(x)->type==t_harr || v(x)->type==t_hslice) { \ @@ -292,7 +297,13 @@ B select_c2(B t, B w, B x) { for (; i < wia; i++) r.a[i] = Get(x, WRAP(wp[i], xia, thrF("⊏: Indexing out-of-bounds (%i∊𝕨, %s≡≠𝕩)", wp[i], xia))); \ dec(x); return withFill(harr_fcd(r,w),xf); \ } - if (TI(w,elType)==el_i8) TYPE(i8) + if (TI(w,elType)==el_bit && xia>=2) { + SGetU(x) + B r = bit_sel(w, GetU(x,0), true, GetU(x,1), true); + dec(x); + return withFill(r, xf); + } + else if (TI(w,elType)==el_i8) TYPE(i8) else if (TI(w,elType)==el_i16) TYPE(i16) else if (TI(w,elType)==el_i32) TYPE(i32) else { @@ -360,7 +371,14 @@ B slash_c1(B t, B x) { } i32* rp; B r = m_i32arrv(&rp, s); u8 xe = TI(x,elType); - if (xe==el_i8) { + if (xe==el_bit) { + u64* xp = bitarr_ptr(x); + while (xia>0 && !bitp_get(xp,xia-1)) xia--; + for (u64 i = 0; i < xia; i++) { + *rp = i; + rp+= bitp_get(xp, i); + } + } else if (xe==el_i8) { i8* xp = i8any_ptr(x); while (xia>0 && !xp[xia-1]) xia--; for (u64 i = 0; i < xia; i++) { @@ -408,6 +426,37 @@ B slash_c2(B t, B w, B x) { B xf = getFillQ(x); usz ri = 0; + if (TI(w,elType)==el_bit) { + u64* wp = bitarr_ptr(w); + while (wia>0 && !bitp_get(wp,wia-1)) wia--; + usz wsum = bit_sum(wp, wia); + B r; + switch(TI(x,elType)) { default: UD; + case el_bit: { u64* xp = bitarr_ptr(x); u64* rp; r = m_bitarrv(&rp,wsum); for (usz i=0; i>SIGN) thrM("/: 𝕨 must consist of natural numbers"); \ + if (TI(x,elType)==el_bit) { \ + u64* xp = bitarr_ptr(x); u64 ri=0; \ + u64* rp; B r = m_bitarrv(&rp, wsum); \ + if (or<2) for (usz i = 0; i < wia; i++) { \ + bitp_set(rp, ri, bitp_get(xp,i)); \ + ri+= wp[i]; \ + } else for (usz i = 0; i < wia; i++) { \ + WT cw = wp[i]; bool cx = bitp_get(xp,i); \ + for (i64 j = 0; j < cw; j++) bitp_set(rp, ri++, cx); \ + } \ + dec(w); dec(x); return r; \ + } \ CASE(WT,i8) CASE(WT,i16) CASE(WT,i32) CASE(WT,f64) \ SLOW2("𝕨/𝕩", w, x); \ HArr_p r = m_harrs(wsum, &ri); SGetU(x) \ @@ -882,7 +943,8 @@ B reverse_c1(B t, B x) { u8 xe = TI(x,elType); usz xia = a(x)->ia; if (rnk(x)==1 && xea[cw] = cr; } dec(w); dec(rep); FREE_CHECK; return taga(xn); + } else if (me==el_bit) { + BitArr* xn = reuse? toBitArr(REUSE(x)) : cpyBitArr(x); + rep = taga(toBitArr(rep)); u64* rp = bitarr_ptr(rep); + for (usz i = 0; i < wia; i++) { + i64 cw = wp[i]; if (RARE(cw<0)) cw+= (i64)xia; + bool cr = bitp_get(rp, i); + EQ(cr != bitp_get(xn->a,cw)); + bitp_set(xn->a,cw,cr); + } + dec(w); dec(rep); FREE_CHECK; return taga(xn); } else UD; } if (reusable(x) && xe==re) { diff --git a/src/builtins/sysfn.c b/src/builtins/sysfn.c index 6f460711..600b6df5 100644 --- a/src/builtins/sysfn.c +++ b/src/builtins/sysfn.c @@ -124,15 +124,27 @@ B grLen_both(i64 ria, B x) { } if (ria > (i64)(USZ_MAX-1)) thrOOM(); ria++; - i32* rp; B r = m_i32arrv(&rp, ria); + B r; + { + u64* rp; r = m_bitarrv(&rp, ria); + for (usz i = 0; i < BIT_N(ria); i++) rp[i] = 0; + for (usz i = 0; i < ia; i++) { + i64 n = o2i64u(GetU(x, i)); assert(n>=-1); + if (n>=0) { + if (bitp_get(rp,n)) { dec(r); goto r_i32; } + bitp_set(rp,n,1); + } + } + goto r_r; + } + r_i32:; + i32* rp; r = m_i32arrv(&rp, ria); for (usz i = 0; i < ria; i++) rp[i] = 0; for (usz i = 0; i < ia; i++) { - i64 n = o2i64u(GetU(x, i)); + i64 n = o2i64u(GetU(x, i)); assert(n>=-1); if (n>=0) rp[n]++; - assert(n>=-1); } - dec(x); - return r; + r_r: dec(x); return r; } B grLen_c1(B t, B x) { return grLen_both( -1, x); } // assumes valid arguments B grLen_c2(B t, B w, B x) { return grLen_both(o2i64u(w)-1, x); } // assumes valid arguments diff --git a/src/core/fillarr.c b/src/core/fillarr.c index 9d0ece47..94ee51f3 100644 --- a/src/core/fillarr.c +++ b/src/core/fillarr.c @@ -5,8 +5,8 @@ B asFill(B x) { // consumes u8 xe = TI(x,elType); usz ia = a(x)->ia; if (elNum(xe)) { - i8* rp; B r = m_i8arrc(&rp, x); - for (usz i = 0; i < ia; i++) rp[i] = 0; + u64* rp; B r = m_bitarrc(&rp, x); + for (usz i = 0; i < BIT_N(ia); i++) rp[i] = 0; dec(x); return r; } diff --git a/src/core/fillarr.h b/src/core/fillarr.h index e804d66a..7f5a0668 100644 --- a/src/core/fillarr.h +++ b/src/core/fillarr.h @@ -29,7 +29,7 @@ static bool fillEqual(B w, B x) { static B getFillR(B x) { // doesn't consume; can return bi_noFill if (isArr(x)) { switch(TI(x,elType)) { default: UD; - case el_i8: case el_i16: case el_i32: case el_f64: return m_i32(0); + case el_i8: case el_i16: case el_i32: case el_f64: case el_bit: return m_i32(0); case el_c8: case el_c16: case el_c32: return m_c32(' '); case el_B:; u8 t = v(x)->type; @@ -86,6 +86,7 @@ static B m_atomUnit(B x) { Arr* r; i32 xi = (i32)x.f; if (RARE(xi!=x.f)) { f64* rp; r = m_f64arrp(&rp, 1); rp[0] = x.f; } + else if (q_ibit(xi)) { u64* rp; r = m_bitarrp(&rp, 1); rp[0] = bitx(x); } else if (xi==(i8 )xi) { i8* rp; r = m_i8arrp (&rp, 1); rp[0] = xi; } else if (xi==(i16)xi) { i16* rp; r = m_i16arrp(&rp, 1); rp[0] = xi; } else { i32* rp; r = m_i32arrp(&rp, 1); rp[0] = xi; } diff --git a/src/core/harr.c b/src/core/harr.c index 29456b5a..3834cc9c 100644 --- a/src/core/harr.c +++ b/src/core/harr.c @@ -65,7 +65,8 @@ HArr* cpyHArr(B x) { usz ia = a(x)->ia; HArr_p r = m_harrUc(x); u8 xe = TI(x,elType); - if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i> 6) + +static inline void bitp_set(u64* arr, u64 n, bool v) { + u64 m = ((u64)1)<<(n&63); + if (v) arr[n>>6]|= m; + else arr[n>>6]&= ~m; + // arr[n>>6] = (arr[n>>6]&(~m)) | (((u64)v)<<(n&63)); +} +static inline bool bitp_get(u64* arr, u64 n) { + return (arr[n>>6] >> (n&63)) & 1; +} +static inline u64 bitx(B x) { // repeats the boolean across all 64 bits + return o2bu(x)? ~(u64)0 : 0; +} + +// BitArr +typedef struct BitArr { + struct Arr; + u64 a[]; +} BitArr; +#define BITARR_SZ(IA) fsizeof(BitArr, a, u64, BIT_N(IA)) +static B m_bitarrv(u64** p, usz ia) { + BitArr* r = m_arr(BITARR_SZ(ia), t_bitarr, ia); + arr_shVec((Arr*)r); + *p = r->a; + return taga(r); +} +static B m_bitarrc(u64** p, B x) { assert(isArr(x)); + BitArr* r = m_arr(BITARR_SZ(a(x)->ia), t_bitarr, a(x)->ia); + *p = r->a; + arr_shCopy((Arr*)r, x); + return taga(r); +} +static Arr* m_bitarrp(u64** p, usz ia) { + BitArr* r = m_arr(BITARR_SZ(ia), t_bitarr, ia); + *p = r->a; + return (Arr*)r; +} +static u64* bitarr_ptr(B x) { VTY(x, t_bitarr); return c(BitArr,x)->a; } I8Arr* cpyI8Arr (B x); // consumes I16Arr* cpyI16Arr(B x); // consumes I32Arr* cpyI32Arr(B x); // consumes F64Arr* cpyF64Arr(B x); // consumes +BitArr* cpyBitArr(B x); // consumes // all consume x static I8Arr* toI8Arr (B x) { return v(x)->type==t_i8arr ? c(I8Arr, x) : cpyI8Arr (x); } static I16Arr* toI16Arr(B x) { return v(x)->type==t_i16arr? c(I16Arr,x) : cpyI16Arr(x); } static I32Arr* toI32Arr(B x) { return v(x)->type==t_i32arr? c(I32Arr,x) : cpyI32Arr(x); } static F64Arr* toF64Arr(B x) { return v(x)->type==t_f64arr? c(F64Arr,x) : cpyF64Arr(x); } +static BitArr* toBitArr(B x) { return v(x)->type==t_bitarr? c(BitArr,x) : cpyBitArr(x); } static B toI8Any (B x) { u8 t=v(x)->type; return t==t_i8arr || t==t_i8slice ? x : taga(cpyI8Arr (x)); } static B toI16Any(B x) { u8 t=v(x)->type; return t==t_i16arr || t==t_i16slice? x : taga(cpyI16Arr(x)); } @@ -37,12 +77,23 @@ static B toI32Any(B x) { u8 t=v(x)->type; return t==t_i32arr || t==t_i32slice? x static B toF64Any(B x) { u8 t=v(x)->type; return t==t_f64arr || t==t_f64slice? x : taga(cpyF64Arr(x)); } +B m_cai32(usz ia, i32* a); +B m_caf64(usz sz, f64* a); + +static i64 bit_sum(u64* x, u64 am) { + i64 r = 0; + for (u64 i = 0; i < (am>>6); i++) r+= POPC(x[i]); + if (am&63) r+= POPC(x[am>>6]<<(64-am & 63)); + return r; +} + static i64 isum(B x) { // doesn't consume; may error assert(isArr(x)); i64 r = 0; usz xia = a(x)->ia; u8 xe = TI(x,elType); - if (xe==el_i8 ) { i8* p = i8any_ptr (x); for (usz i = 0; i < xia; i++) r+= p[i]; } + if (xe==el_bit) return bit_sum(bitarr_ptr(x), xia); + else if (xe==el_i8 ) { i8* p = i8any_ptr (x); for (usz i = 0; i < xia; i++) r+= p[i]; } else if (xe==el_i16) { i16* p = i16any_ptr(x); for (usz i = 0; i < xia; i++) if (addOn(r,p[i])) goto err; } else if (xe==el_i32) { i32* p = i32any_ptr(x); for (usz i = 0; i < xia; i++) if (addOn(r,p[i])) goto err; } else if (xe==el_f64) { diff --git a/src/core/stuff.c b/src/core/stuff.c index b3dcb852..f3a43b4f 100644 --- a/src/core/stuff.c +++ b/src/core/stuff.c @@ -486,19 +486,20 @@ bool isPureFn(B x) { // doesn't consume B num_squeeze(B x) { usz ia = a(x)->ia; u8 xe = TI(x,elType); - assert(xe!=el_bit); - - if (xe==el_i8) goto r_x; + if (xe==el_bit) goto r_x; // TODO fast paths for xe>31); + i32 sgn = (u32)(c>>31); + or|= ((u32)c) ^ sgn; + neg|= sgn; } goto r_or; } @@ -511,7 +512,9 @@ B num_squeeze(B x) { goto r_f64; } i32 c = o2iu(xp[i]); - or|= ((u32)c) ^ (u32)(c>>31); + i32 sgn = (u32)(c>>31); + or|= ((u32)c) ^ sgn; + neg|= sgn; } goto r_or; } @@ -524,14 +527,18 @@ B num_squeeze(B x) { goto r_f64; } i32 c = o2iu(cr); - or|= ((u32)c) ^ (u32)(c>>31); + i32 sgn = (u32)(c>>31); + or|= ((u32)c) ^ sgn; + neg|= sgn; } r_or: - if (or<=(u32)I8_MAX ) goto r_i8; + if (!neg && or<=1) goto r_bit; + else if (or<=(u32)I8_MAX ) goto r_i8; else if (or<=(u32)I16_MAX) goto r_i16; else goto r_i32; r_x : return FL_SET(x, fl_squoze); + r_bit: return FL_SET(taga(toBitArr(x)), fl_squoze); r_i8 : return FL_SET(toI8Any (x), fl_squoze); r_i16: return FL_SET(toI16Any(x), fl_squoze); r_i32: return FL_SET(toI32Any(x), fl_squoze); @@ -582,7 +589,7 @@ B chr_squeeze(B x) { B any_squeeze(B x) { assert(isArr(x)); if (FL_HAS(x,fl_squoze)) return x; - if (a(x)->ia==0) return FL_SET(x, fl_squoze); + if (a(x)->ia==0) return FL_SET(x, fl_squoze); // TODO return a version of the smallest type SGetU(x) B x0 = GetU(x, 0); if (isNum(x0)) return num_squeeze(x); @@ -700,6 +707,8 @@ Arr* g_a(B x) { return a(x); } B g_t (void* x) { return tag(x,OBJ_TAG); } B g_ta(void* x) { return tag(x,ARR_TAG); } B g_tf(void* x) { return tag(x,FUN_TAG); } +void g_p(B x) { print(x); putchar(10); fflush(stdout); } +void g_pv(void* x) { print(tag(x,OBJ_TAG)); putchar(10); fflush(stdout); } #ifdef DEBUG #ifdef OBJ_COUNTER @@ -749,16 +758,19 @@ B g_tf(void* x) { return tag(x,FUN_TAG); } void warn_slow1(char* s, B x) { if (isArr(x) && a(x)->ia<100) return; printf("slow %s: ", s); warn_ln(x); + fflush(stdout); } void warn_slow2(char* s, B w, B x) { if ((isArr(w)||isArr(x)) && (!isArr(w) || a(w)->ia<50) && (!isArr(x) || a(x)->ia<50)) return; printf("slow %s:\n 𝕨: ", s); warn_ln(w); printf(" 𝕩: "); warn_ln(x); + fflush(stdout); } void warn_slow3(char* s, B w, B x, B y) { if ((isArr(w)||isArr(x)) && (!isArr(w) || a(w)->ia<50) && (!isArr(x) || a(x)->ia<50)) return; printf("slow %s:\n 𝕨: ", s); warn_ln(w); printf(" 𝕩: "); warn_ln(x); printf(" f: "); warn_ln(y); + fflush(stdout); } #endif diff --git a/src/core/stuff.h b/src/core/stuff.h index 7ec9aa16..335036cb 100644 --- a/src/core/stuff.h +++ b/src/core/stuff.h @@ -89,6 +89,8 @@ static bool eqShape(B w, B x) { assert(isArr(w)); assert(isArr(x)); return eqShPrefix(wsh, xsh, wr); } +B bit_sel(B b, B e0, bool h0, B e1, bool h1); // consumes b; h0/h1 can be true if unknown + static B m_v1(B a ); // consumes all static B m_v2(B a, B b ); // consumes all @@ -108,7 +110,7 @@ static usz uszMul(usz a, usz b) { } static u8 selfElType(B x) { // guaranteed to fit fill - if (isF64(x)) return q_i16(x)? (q_i8(x)? el_i8 : el_i16) : (q_i32(x)? el_i32 : el_f64); + if (isF64(x)) return q_i8(x)? (q_bit(x)? el_bit : el_i8) : (q_i16(x)? el_i16 : q_i32(x)? el_i32 : el_f64); if (isC32(x)) return LIKELY(q_c8(x))? el_c8 : q_c16(x)? el_c16 : el_c32; return el_B; } diff --git a/src/core/tyarr.c b/src/core/tyarr.c index db9baa31..4d1bbcfe 100644 --- a/src/core/tyarr.c +++ b/src/core/tyarr.c @@ -1,4 +1,5 @@ #include "../core.h" +#include "../utils/mut.h" B m_i8(i8 x) { return m_i32(x); } B m_i16(i16 x) { return m_i32(x); } B m_c8(u8 x) { return m_c32(x); } B m_c16(u16 x) { return m_c32(x); } @@ -40,7 +41,8 @@ NOINLINE B m_str32(u32* s) { E* rp; Arr* r = m_##E##arrp(&rp, ia); \ arr_shCopy(r, x); \ u8 xe = TI(x,elType); \ - if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; iia; + u64* rp; Arr* r = m_bitarrp(&rp, ia); + arr_shCopy(r, x); + u8 xe = TI(x,elType); + if (xe==el_bit) { u64* xp = bitarr_ptr(x); for(usz i=0; itype==t_bitarr); return bitp_get(((BitArr*)x)->a, n)? m_f64(1) : m_f64(0); } +static bool bitarr_canStore(B x) { return q_bit(x); } + +static void bitarr_init() { + TIi(t_bitarr,get) = bitarr_get; + TIi(t_bitarr,getU) = bitarr_get; + TIi(t_bitarr,slice) = bitarr_slice; + TIi(t_bitarr,freeO) = tyarr_freeO; + TIi(t_bitarr,freeF) = tyarr_freeF; + TIi(t_bitarr,visit) = noop_visit; + TIi(t_bitarr,print) = arr_print; + TIi(t_bitarr,isArr) = true; + TIi(t_bitarr,arrD1) = true; + TIi(t_bitarr,elType) = el_bit; + TIi(t_bitarr,canStore) = bitarr_canStore; +} + void tyarr_init() { - i8arr_init(); i16arr_init(); i32arr_init(); + i8arr_init(); i16arr_init(); i32arr_init(); bitarr_init(); c8arr_init(); c16arr_init(); c32arr_init(); f64arr_init(); - { i8* tmp; bi_emptyIVec = m_i8arrv(&tmp, 0); gc_add(bi_emptyIVec); } - { u8* tmp; bi_emptyCVec = m_c8arrv(&tmp, 0); gc_add(bi_emptyCVec); } + { u64* tmp; bi_emptyIVec = m_bitarrv(&tmp, 0); gc_add(bi_emptyIVec); } + { u8* tmp; bi_emptyCVec = m_c8arrv (&tmp, 0); gc_add(bi_emptyCVec); } Arr* emptySVec = m_fillarrp(0); arr_shVec(emptySVec); fillarr_setFill(emptySVec, emptyCVec()); diff --git a/src/h.h b/src/h.h index e2cdf451..0ec98449 100644 --- a/src/h.h +++ b/src/h.h @@ -188,13 +188,14 @@ typedef union B { /*10*/ F(md1D) F(md2D) F(md2H) \ \ /*13*/ F(harr ) F(i8arr ) F(i16arr ) F(i32arr ) F(fillarr ) F(c8arr ) F(c16arr ) F(c32arr ) F(f64arr ) \ - /*19*/ F(hslice) F(i8slice) F(i16slice) F(i32slice) F(fillslice) F(c8slice) F(c16slice) F(c32slice) F(f64slice) \ + /*22*/ F(hslice) F(i8slice) F(i16slice) F(i32slice) F(fillslice) F(c8slice) F(c16slice) F(c32slice) F(f64slice) \ + /*31*/ F(bitarr) \ \ - /*25*/ F(comp) F(block) F(body) F(scope) F(scopeExt) F(blBlocks) \ - /*31*/ F(ns) F(nsDesc) F(fldAlias) F(vfyObj) F(hashmap) F(temp) F(nfn) F(nfnDesc) \ - /*38*/ F(freed) F(harrPartial) \ + /*32*/ F(comp) F(block) F(body) F(scope) F(scopeExt) F(blBlocks) \ + /*38*/ F(ns) F(nsDesc) F(fldAlias) F(vfyObj) F(hashmap) F(temp) F(nfn) F(nfnDesc) \ + /*46*/ F(freed) F(harrPartial) \ \ - /*40*/ IF_RT_WRAP(F(funWrap) F(md1Wrap) F(md2Wrap)) + /*48*/ IF_RT_WRAP(F(funWrap) F(md1Wrap) F(md2Wrap)) enum Type { #define F(X) t_##X, @@ -203,8 +204,8 @@ enum Type { t_COUNT }; -enum ElType { // a⌈b shall return the type that can store both, if possible; any x<=el_f64 is an integer type - el_bit=0, // unused; just here for completeness of ElType +enum ElType { // a⌈b shall return the type that can store both, if possible + el_bit=0, el_i8 =1, el_i16=2, el_i32=3, @@ -384,6 +385,8 @@ static usz o2su (B x) { return (usz)x.f; } static f64 o2fu (B x) { return x.f; } static i64 o2i64u(B x) { return (i64)x.f; } static bool o2b (B x) { usz t=o2s(x); if(t!=0&t!=1)thrM("Expected boolean"); return t; } +static bool o2bu (B x) { return o2s(x); } +static bool q_bit(B x) { return isNum(x) & (x.f==0 | x.f==1); } static bool q_c8 (B x) { return isC32(x) && ((u32)x.u) == ((u8 )x.u); } static bool q_c16(B x) { return isC32(x) && ((u32)x.u) == ((u16)x.u); } static bool q_c32(B x) { return isC32(x); } @@ -394,6 +397,9 @@ static bool q_i64(B x) { return isF64(x) && x.f==(f64)(i64)x.f; } static bool q_f64(B x) { return isF64(x); } static bool q_N (B x) { return x.u==bi_N.u; } // is · static bool noFill(B x) { return x.u == bi_noFill.u; } +static bool q_ibit(i64 x) { return x==0 | x==1; } +static bool q_ubit(u64 x) { return x==0 | x==1; } +static bool q_fbit(f64 x) { return x==0 | x==1; } typedef struct Slice { diff --git a/src/opt/single.c b/src/opt/single.c index 05d7a3c4..6619f621 100644 --- a/src/opt/single.c +++ b/src/opt/single.c @@ -12,6 +12,7 @@ #include "../utils/file.c" #include "../utils/mut.c" #include "../utils/each.c" +#include "../utils/bits.c" #include "../builtins/fns.c" #include "../builtins/sfns.c" #include "../builtins/sysfn.c" diff --git a/src/utils/bits.c b/src/utils/bits.c new file mode 100644 index 00000000..fcef28d0 --- /dev/null +++ b/src/utils/bits.c @@ -0,0 +1,32 @@ +#include "../core.h" + +NOINLINE B bit_sel(B b, B e0, bool h0, B e1, bool h1) { + u8 t0 = selfElType(e0); + u8 t1 = selfElType(e1); + if (!h0) t0=t1; // TODO just do separate impls for !h0 and !h1 + if (!h1) t1=t0; + u64* bp = bitarr_ptr(b); + usz ia = a(b)->ia; + if (elNum(t0) && elNum(t1)) { B r; + f64 f0 = o2fu(e0); i32 i0 = f0; + f64 f1 = o2fu(e1); i32 i1 = f1; + if (t0<=el_i8 & t1<=el_i8 ) { i8* rp; r=m_i8arrc (&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? i1 : i0; } + else if (t0<=el_i16 & t1<=el_i16) { i16* rp; r=m_i16arrc(&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? i1 : i0; } + else if (t0<=el_i32 & t1<=el_i32) { i32* rp; r=m_i32arrc(&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? i1 : i0; } + else { f64* rp; r=m_f64arrc(&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? f1 : f0; } + dec(b); return r; + } else if (elChr(t0) && elChr(t1)) { B r; u32 u0 = o2cu(e0); u32 u1 = o2cu(e1); + if (t0<=el_c8 & t1<=el_c8 ) { u8* rp; r=m_c8arrc (&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? u1 : u0; } + else if (t0<=el_c16 & t1<=el_c16) { u16* rp; r=m_c16arrc(&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? u1 : u0; } + else { u32* rp; r=m_c32arrc(&rp, b); for (usz i = 0; i < ia; i++) rp[i] = bitp_get(bp,i)? u1 : u0; } + dec(b); return r; + } + HArr_p r = m_harrUc(b); + for (usz i = 0; i < ia; i++) r.a[i] = bitp_get(bp,i)? e1 : e0; + + u64 c1 = bit_sum(bp, ia); + u64 c0 = ia-c1; + incBy(e0,c0); + incBy(e1,c1); + dec(b); return r.b; +} diff --git a/src/utils/mut.c b/src/utils/mut.c index 7254f4cf..7ca36f01 100644 --- a/src/utils/mut.c +++ b/src/utils/mut.c @@ -21,6 +21,7 @@ NOINLINE void mut_to(Mut* m, u8 n) { } #endif switch(n) { default: UD; + case el_bit: { BitArr* t=cpyBitArr(taga(m->val)); m->val=(Arr*)t; m->abit=t->a; return; } case el_i8: { I8Arr* t=cpyI8Arr (taga(m->val)); m->val=(Arr*)t; m->ai8 =t->a; return; } case el_i16: { I16Arr* t=cpyI16Arr(taga(m->val)); m->val=(Arr*)t; m->ai16=t->a; return; } case el_i32: { I32Arr* t=cpyI32Arr(taga(m->val)); m->val=(Arr*)t; m->ai32=t->a; return; } diff --git a/src/utils/mut.h b/src/utils/mut.h index 03e79f9b..96c5a3e8 100644 --- a/src/utils/mut.h +++ b/src/utils/mut.h @@ -20,8 +20,7 @@ typedef struct Mut { union { i8* ai8; i16* ai16; i32* ai32; u8* ac8; u16* ac16; u32* ac32; - f64* af64; - B* aB; + f64* af64; u64* abit; B* aB; }; } Mut; #define MAKE_MUT(N, IA) Mut N##_val; N##_val.type = el_MAX; N##_val.ia = (IA); Mut* N = &N##_val; @@ -33,6 +32,7 @@ static void mut_init(Mut* m, u8 n) { usz sz; // hack around inlining of the allocator too many times switch(n) { default: UD; + case el_bit: ty = t_bitarr; sz = BITARR_SZ( ia); break; case el_i8: ty = t_i8arr ; sz = TYARR_SZ(I8, ia); break; case el_i16: ty = t_i16arr; sz = TYARR_SZ(I16,ia); break; case el_i32: ty = t_i32arr; sz = TYARR_SZ(I32,ia); break; @@ -51,7 +51,7 @@ static void mut_init(Mut* m, u8 n) { switch(n) { default: UD; // gcc generates horrible code for this (which should just be two instructions), but that's what gcc does case el_i8: m->ai8 = ((I8Arr*)a)->a; break; case el_i16: m->ai16 = ((I16Arr*)a)->a; break; case el_i32: m->ai32 = ((I32Arr*)a)->a; break; case el_c8: m->ac8 = ((C8Arr*)a)->a; break; case el_c16: m->ac16 = ((C16Arr*)a)->a; break; case el_c32: m->ac32 = ((C32Arr*)a)->a; break; - case el_f64: m->af64 = ((F64Arr*)a)->a; break; + case el_f64: m->af64 = ((F64Arr*)a)->a; break; case el_bit: m->abit = ((BitArr*)a)->a; break; } } void mut_to(Mut* m, u8 n); @@ -88,6 +88,7 @@ static void mut_set(Mut* m, usz ms, B x) { // consumes x; sets m[ms] to x again:; switch(m->type) { default: UD; case el_MAX: goto change; + case el_bit: if (!q_bit(x)) goto change; bitp_set(m->abit, ms, o2bu(x)); return; case el_i8: if (!q_i8 (x)) goto change; m->ai8 [ms] = o2iu(x); return; case el_i16: if (!q_i16(x)) goto change; m->ai16[ms] = o2iu(x); return; case el_i32: if (!q_i32(x)) goto change; m->ai32[ms] = o2iu(x); return; @@ -106,6 +107,7 @@ static void mut_set(Mut* m, usz ms, B x) { // consumes x; sets m[ms] to x } static void mut_setG(Mut* m, usz ms, B x) { // consumes; sets m[ms] to x, assumes the current type can store it switch(m->type) { default: UD; + case el_bit: { assert(q_bit(x)); bitp_set(m->abit, ms, o2bu(x)); return; } case el_i8 : { assert(q_i8 (x)); m->ai8 [ms] = o2iu(x); return; } case el_i16: { assert(q_i16(x)); m->ai16[ms] = o2iu(x); return; } case el_i32: { assert(q_i32(x)); m->ai32[ms] = o2iu(x); return; } @@ -124,6 +126,7 @@ static void mut_rm(Mut* m, usz ms) { // clears the object at position ms } static B mut_getU(Mut* m, usz ms) { switch(m->type) { default: UD; + case el_bit: return m_i32(bitp_get(m->abit, ms)); case el_i8: return m_i32(m->ai8 [ms]); case el_i16: return m_i32(m->ai16[ms]); case el_i32: return m_i32(m->ai32[ms]); @@ -137,6 +140,7 @@ static B mut_getU(Mut* m, usz ms) { static void mut_fillG(Mut* m, usz ms, B x, usz l) { // doesn't consume x switch(m->type) { default: UD; + case el_bit: { assert(q_bit(x)); u64* p = m->abit; bool v = o2bu(x); for (usz i = 0; i < l; i++) bitp_set(p, ms+i, v); return; } case el_i8: { assert(q_i8 (x)); i8* p = m->ai8 +ms; i8 v = o2iu(x); for (usz i = 0; i < l; i++) p[i] = v; return; } case el_i16: { assert(q_i16(x)); i16* p = m->ai16+ms; i16 v = o2iu(x); for (usz i = 0; i < l; i++) p[i] = v; return; } case el_i32: { assert(q_i32(x)); i32* p = m->ai32+ms; i32 v = o2iu(x); for (usz i = 0; i < l; i++) p[i] = v; return; } @@ -160,25 +164,59 @@ static void mut_fill(Mut* m, usz ms, B x, usz l) { mut_fillG(m, ms, x, l); } +static void bit_cpy(u64* r, usz rs, u64* x, usz xs, usz l) { // TODO rewrite this whole thing to be all fancy + u64 i = rs; + u64 re = rs+(u64)l; + i64 d = (i64)xs-(i64)rs; + if (l>128) { + for (; i>6; + u64 ei = (re>>6) - 2; + i64 dp = d>>6; + u64 df = ((u64)d)&63u; + if (df==0) for (; ti> df) | (x[ti+dp+1] << (64-df)); + i = ti<<6; + } + for (; itype, ms, ms+l, type_repr(xt), xs, xs+l); fflush(stdout); u8 xt = v(x)->type; switch(m->type) { default: UD; - case el_i8: { i8* rp = m->ai8+ms; i8* xp = i8any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } + case el_bit: bit_cpy(m->abit, ms, bitarr_ptr(x), xs, l); return; + case el_i8: { i8* rp = m->ai8+ms; + switch (xt) { default: UD; + case t_bitarr: { u64* xp = bitarr_ptr(x); for (usz i = 0; i < l; i++) rp[i] = bitp_get(xp, xs+i); return; } + case t_i8arr: case t_i8slice: { i8* xp = i8any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } + } + } case el_i16: { i16* rp = m->ai16+ms; switch (xt) { default: UD; + case t_bitarr: { u64* xp = bitarr_ptr(x); for (usz i = 0; i < l; i++) rp[i] = bitp_get(xp, xs+i); return; } case t_i8arr: case t_i8slice: { i8* xp = i8any_ptr (x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } case t_i16arr: case t_i16slice: { i16* xp = i16any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } } } case el_i32: { i32* rp = m->ai32+ms; switch (xt) { default: UD; + case t_bitarr: { u64* xp = bitarr_ptr(x); for (usz i = 0; i < l; i++) rp[i] = bitp_get(xp, xs+i); return; } case t_i8arr: case t_i8slice: { i8* xp = i8any_ptr (x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } case t_i16arr: case t_i16slice: { i16* xp = i16any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } case t_i32arr: case t_i32slice: { i32* xp = i32any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } } } + case el_f64: { + f64* rp = m->af64+ms; + switch (xt) { default: UD; + case t_bitarr: { u64* xp = bitarr_ptr(x); for (usz i = 0; i < l; i++) rp[i] = bitp_get(xp, xs+i); return; } + case t_i8arr: case t_i8slice: { i8* xp = i8any_ptr (x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } + case t_i16arr: case t_i16slice: { i16* xp = i16any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } + case t_i32arr: case t_i32slice: { i32* xp = i32any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } + case t_f64arr: case t_f64slice: { f64* xp = f64any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } + } + } case el_c8: { u8* rp = m->ac8+ms; u8* xp = c8any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } case el_c16: { u16* rp = m->ac16+ms; switch (xt) { default: UD; @@ -193,15 +231,6 @@ static void mut_copyG(Mut* m, usz ms, B x, usz xs, usz l) { assert(isArr(x)); case t_c32arr: case t_c32slice: { u32* xp = c32any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } } } - case el_f64: { - f64* rp = m->af64+ms; - switch (xt) { default: UD; - case t_i8arr: case t_i8slice: { i8* xp = i8any_ptr (x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } - case t_i16arr: case t_i16slice: { i16* xp = i16any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } - case t_i32arr: case t_i32slice: { i32* xp = i32any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } - case t_f64arr: case t_f64slice: { f64* xp = f64any_ptr(x); for (usz i = 0; i < l; i++) rp[i] = xp[i+xs]; return; } - } - } case el_B: { B* mpo = m->aB+ms; B* xp = arr_bptr(x); @@ -235,13 +264,14 @@ static B vec_join(B w, B x) { // consumes both u64 wsz = mm_size(v(w)); u8 wt = v(w)->type; // TODO f64∾i32, i32∾i8, c32∾c8 etc - if (wt==t_i8arr && fsizeof(I8Arr ,a,i8 ,ria)ia=ria; memcpy(i8arr_ptr (w)+wia, i8any_ptr (x), xia*1); dec(x); return FL_KEEP(w,fl_squoze); } - if (wt==t_i16arr && fsizeof(I16Arr,a,i16,ria)ia=ria; memcpy(i16arr_ptr(w)+wia, i16any_ptr(x), xia*2); dec(x); return FL_KEEP(w,fl_squoze); } - if (wt==t_i32arr && fsizeof(I32Arr,a,i32,ria)ia=ria; memcpy(i32arr_ptr(w)+wia, i32any_ptr(x), xia*4); dec(x); return FL_KEEP(w,fl_squoze); } - if (wt==t_c8arr && fsizeof(C8Arr ,a,u8, ria)ia=ria; memcpy(c8arr_ptr (w)+wia, c8any_ptr (x), xia*1); dec(x); return FL_KEEP(w,fl_squoze); } - if (wt==t_c16arr && fsizeof(C16Arr,a,u16,ria)ia=ria; memcpy(c16arr_ptr(w)+wia, c16any_ptr(x), xia*2); dec(x); return FL_KEEP(w,fl_squoze); } - if (wt==t_c32arr && fsizeof(C32Arr,a,u32,ria)ia=ria; memcpy(c32arr_ptr(w)+wia, c32any_ptr(x), xia*4); dec(x); return FL_KEEP(w,fl_squoze); } - if (wt==t_f64arr && fsizeof(F64Arr,a,f64,ria)ia=ria; memcpy(f64arr_ptr(w)+wia, f64any_ptr(x), xia*8); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_bitarr && BITARR_SZ( ria)ia=ria; bit_cpy(bitarr_ptr(w),wia,bitarr_ptr(x),0,xia); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_i8arr && TYARR_SZ(I8, ria)ia=ria; memcpy(i8arr_ptr (w)+wia, i8any_ptr (x), xia*1); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_i16arr && TYARR_SZ(I16,ria)ia=ria; memcpy(i16arr_ptr(w)+wia, i16any_ptr(x), xia*2); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_i32arr && TYARR_SZ(I32,ria)ia=ria; memcpy(i32arr_ptr(w)+wia, i32any_ptr(x), xia*4); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_c8arr && TYARR_SZ(C8, ria)ia=ria; memcpy(c8arr_ptr (w)+wia, c8any_ptr (x), xia*1); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_c16arr && TYARR_SZ(C16,ria)ia=ria; memcpy(c16arr_ptr(w)+wia, c16any_ptr(x), xia*2); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_c32arr && TYARR_SZ(C32,ria)ia=ria; memcpy(c32arr_ptr(w)+wia, c32any_ptr(x), xia*4); dec(x); return FL_KEEP(w,fl_squoze); } + if (wt==t_f64arr && TYARR_SZ(F64,ria)ia=ria; memcpy(f64arr_ptr(w)+wia, f64any_ptr(x), xia*8); dec(x); return FL_KEEP(w,fl_squoze); } if (wt==t_harr && fsizeof(HArr,a,B,ria)ia = ria; B* rp = harr_ptr(w)+wia; @@ -278,10 +308,11 @@ static inline bool inplace_add(B w, B x) { // consumes x if returns true; fails if (reusable(w)) { u64 wsz = mm_size(v(w)); u8 wt = v(w)->type; - if (wt==t_i8arr && TYARR_SZ(I8 ,ria)ia=ria; i8arr_ptr (w)[wia]=o2iu(x); return true; } + if (wt==t_bitarr && BITARR_SZ( ria)ia=ria; bitp_set(bitarr_ptr(w),wia,o2bu(x)); return true; } + if (wt==t_i8arr && TYARR_SZ(I8, ria)ia=ria; i8arr_ptr (w)[wia]=o2iu(x); return true; } if (wt==t_i16arr && TYARR_SZ(I16,ria)ia=ria; i16arr_ptr(w)[wia]=o2iu(x); return true; } if (wt==t_i32arr && TYARR_SZ(I32,ria)ia=ria; i32arr_ptr(w)[wia]=o2iu(x); return true; } - if (wt==t_c8arr && TYARR_SZ(C8 ,ria)ia=ria; c8arr_ptr (w)[wia]=o2cu(x); return true; } + if (wt==t_c8arr && TYARR_SZ(C8, ria)ia=ria; c8arr_ptr (w)[wia]=o2cu(x); return true; } if (wt==t_c16arr && TYARR_SZ(C16,ria)ia=ria; c16arr_ptr(w)[wia]=o2cu(x); return true; } if (wt==t_c32arr && TYARR_SZ(C32,ria)ia=ria; c32arr_ptr(w)[wia]=o2cu(x); return true; } if (wt==t_f64arr && TYARR_SZ(F64,ria)ia=ria; f64arr_ptr(w)[wia]=o2fu(x); return true; } diff --git a/src/vm.c b/src/vm.c index a8ed52f7..2f7542b4 100644 --- a/src/vm.c +++ b/src/vm.c @@ -243,11 +243,11 @@ Block* compileBlock(B block, Comp* comp, bool* bDone, u32* bc, usz bcIA, B allBl TSADD(newBC, isVal(obj)? ADDI : ADDU); A64(obj.u); break; - case RETN: if(h!=1) thrM("VM compiler: Wrong stack size before RETN"); + case RETN: if(h!=1) thrM("VM compiler: RETN expected to be called with one item on the stack"); TSADD(newBC, RETN); ret = true; break; - case RETD: if(h!=1&h!=0) thrM("VM compiler: Wrong stack size before RETD"); + case RETD: if(h!=1&h!=0) thrM("VM compiler: RETD expected to be called with no more than 1 item on the stack"); if (h==1) TSADD(newBC, POPS); TSADD(newBC, RETD); ret = true; @@ -283,6 +283,7 @@ Block* compileBlock(B block, Comp* comp, bool* bDone, u32* bc, usz bcIA, B allBl break; } case SETH: case PRED: + if (*c==PRED && h!=1) thrM("VM compiler: PRED expected to be called with one item on the stack"); if (mpsc<1) mpsc=1; // SETH and PRED may want to have a parent scope pointer TSADD(newBC, *c==SETH? SETHi : imm? PRED1 : PRED2); TSADD(bodyReqs, ((NextRequest){.off = TSSIZE(newBC), .pos1 = pos1, .pos2 = imm? U32_MAX : pos2}));