diff --git a/src/builtins/sfns.c b/src/builtins/sfns.c index 43d946ea..0a8109a6 100644 --- a/src/builtins/sfns.c +++ b/src/builtins/sfns.c @@ -1293,6 +1293,7 @@ B shifta_c2(B t, B w, B x) { } extern B ne_c2(B, B, B); +extern B select_c2(B, B, B); extern B rt_group; B group_c2(B t, B w, B x) { if (isArr(w)&isArr(x) && RNK(w)==1 && RNK(x)==1 && depth(w)==1) { @@ -1304,7 +1305,7 @@ B group_c2(B t, B w, B x) { if (we==el_bit) w = taga(cpyI8Arr(w)); i64 ria = 0; bool bad = false, sort = true; - usz neg = 0; + usz neg = 0, change = 0; void *wp0 = tyany_ptr(w); #define CASE(T) case el_##T: { \ T max = -1, prev = -1; \ @@ -1314,6 +1315,7 @@ B group_c2(B t, B w, B x) { bad |= n < -1; \ neg += n == -1; \ sort &= prev <= n; \ + change += prev != n; \ prev = n; \ } \ if (wia>xia) { ria=((T*)wp0)[xia]; bad|=ria<-1; } \ @@ -1323,6 +1325,65 @@ B group_c2(B t, B w, B x) { #undef CASE if (bad) thrM("⊔: 𝕨 can't contain elements less than ¯1"); if (ria > (i64)(USZ_MAX)) thrOOM(); + + Arr* r = arr_shVec(m_fillarrp(ria)); fillarr_setFill(r, m_f64(0)); + B* rp = fillarr_ptr(r); + for (usz i = 0; i < ria; i++) rp[i] = m_f64(0); // don't break if allocation errors + B xf = getFillQ(x); + + Arr* rf = arr_shVec(m_fillarrp(0)); fillarr_setFill(rf, m_f64(0)); + B z = taga(rf); + fillarr_setFill(r, z); + + TALLOC(i32, pos, 2*ria+1); i32* len = pos+ria+1; + // Both cases needed to make sure wia>0 for ip[wia-1] below + if (ria==0) goto intvec_ret; + if (neg==xia) { + for (usz i = 0; i < ria; i++) rp[i] = inc(z); + goto intvec_ret; + } + + u8 xe = TI(x,elType); + u8 width = elWidth(xe); + u64 xw; + if (xia>64 && (xw=(u64)xia*width)<=I32_MAX && changexia) w = C2(take, m_f64(xia), w); + B c = C2(ne, C2(drop, m_f64(-1), inc(w)), + C2(drop, m_f64( 1), inc(w))); + B ind = C1(slash, C2(join, m_f64(-1!=IGetU(w,0).f), c)); + w = C2(select, inc(ind), w); + #undef C1 + #undef C2 + if (TI(ind,elType)!=el_i32) ind = taga(cpyI32Arr(ind)); + if (TI(w ,elType)!=el_i32) w = taga(cpyI32Arr(w )); + wia = IA(ind); + + i32* ip = i32any_ptr(ind); + i32* wp = i32any_ptr(w); + usz i0 = ip[0]; + for (usz i=0; i32 && neg>xia/4+xia/8) { if (wia>xia) w = take_c2(m_f64(0), m_f64(xia), w); B m = ne_c2(m_f64(0), m_f64(-1), inc(w)); @@ -1332,26 +1393,14 @@ B group_c2(B t, B w, B x) { } if (TI(w,elType)!=el_i32) w = taga(cpyI32Arr(w)); i32* wp = i32any_ptr(w); - TALLOC(i32, lenO, ria+1); i32* len = lenO+1; - TALLOC(i32, pos, ria); for (usz i = 0; i < ria; i++) len[i] = pos[i] = 0; for (usz i = 0; i < xia; i++) len[wp[i]]++; // overallocation makes this safe after n<-1 check - Arr* r = arr_shVec(m_fillarrp(ria)); fillarr_setFill(r, m_f64(0)); - B* rp = fillarr_ptr(r); - for (usz i = 0; i < ria; i++) rp[i] = m_f64(0); // don't break if allocation errors - B xf = getFillQ(x); - - Arr* rf = arr_shVec(m_fillarrp(0)); fillarr_setFill(rf, m_f64(0)); - fillarr_setFill(r, taga(rf)); - u8 xe = TI(x,elType); switch (xe) { default: UD; case el_i8: case el_c8: case el_i16: case el_c16: case el_i32: case el_c32: case el_f64: { - u8 width = elWidth(xe); void* xp = tyany_ptr(x); - B z = taga(rf); u8 xt = el2t(xe); if (sort) { for (usz j=0, i=neg*width; j