singeli copy

This commit is contained in:
dzaima 2022-04-25 02:59:40 +03:00
parent 37da98d871
commit ab868a55f3
19 changed files with 419 additions and 170 deletions

View File

@ -137,10 +137,10 @@ preSingeliBin:
git submodule update --init; \
fi
@echo "pre-singeli build:"
@${MAKE} singeli=0 postmsg="singeli sources:" t=presingeli f='-O1' OUTPUT=obj/presingeli/BQN c
@${MAKE} singeli=0 postmsg="singeli sources:" t=presingeli f='-O1 -DPRE_SINGELI' OUTPUT=obj/presingeli/BQN c
gen-singeli: ${addprefix src/singeli/gen/, cmp.c dyarith.c slash.c equal.c scan.c expand.c}
gen-singeli: ${addprefix src/singeli/gen/, cmp.c dyarith.c copy.c equal.c scan.c slash.c}
@echo $(postmsg)
src/singeli/gen/%.c: src/singeli/src/%.singeli preSingeliBin
@echo $< | cut -c 17- | sed 's/^/ /'

View File

@ -33,6 +33,13 @@ static B* arr_bptr(B x) { assert(isArr(x));
if (v(x)->type==t_fillslice) return c(FillSlice,x)->a;
return NULL;
}
static B* arrV_bptr(Arr* x) {
if (x->type==t_harr) return ((HArr*)x)->a;
if (x->type==t_fillarr) return fillarr_ptr(x);
if (x->type==t_hslice) return ((HSlice*)x)->a;
if (x->type==t_fillslice) return ((FillSlice*)x)->a;
return NULL;
}
static void* tyany_ptr(B x) {
u8 t = v(x)->type;
return IS_SLICE(t)? c(TySlice,x)->a : c(TyArr,x)->a;

View File

@ -59,27 +59,6 @@ B toKCells(B x, ur k) {
return HARR_O(r).b;
}
HArr* cpyHArr(B x) {
usz ia = a(x)->ia;
HArr_p r = m_harrUc(x);
u8 xe = TI(x,elType);
if (xe==el_bit) { u64* xp = bitarr_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(bitp_get(xp, i)); }
else if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_i16) { i16* xp = i16any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_i32) { i32* xp = i32any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_f64) { f64* xp = f64any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_c8 ) { u8* xp = c8any_ptr (x); for(usz i=0; i<ia; i++) r.a[i]=m_c32(xp[i]); }
else if (xe==el_c16) { u16* xp = c16any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_c32(xp[i]); }
else if (xe==el_c32) { u32* xp = c32any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_c32(xp[i]); }
else {
B* xp = arr_bptr(x);
if (xp!=NULL) { for (usz i=0; i<ia; i++) r.a[i] = inc(xp[i]); }
else { SGet(x) for (usz i=0; i<ia; i++) r.a[i] = Get(x, i); }
}
dec(x);
return r.c;
}
NOINLINE B m_caB(usz ia, B* a) {
HArr_p r = m_harrUv(ia);
for (usz i = 0; i < ia; i++) r.a[i] = a[i];

View File

@ -459,7 +459,7 @@ NOINLINE bool equal(B w, B x) { // doesn't consume
u8 xe = TI(x,elType);
#if SINGELI
if (we<=el_c32 && xe<=el_c32) {
if (we<=el_c32 && xe<=el_c32) { // remove & pass a(w) and a(x) to fn so it can do basic loop
u8* wp = tyany_ptr(w);
u8* xp = tyany_ptr(x);
u64 idx = we*8 + xe;

View File

@ -36,87 +36,6 @@ NOINLINE B m_str32(u32* s) {
return r;
}
#if SINGELI
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#include "../singeli/gen/expand.c"
#pragma GCC diagnostic pop
#define BIT_ICPY(E) avx2_expand_1_##E(xp, rp, ia);
#else
#define BIT_ICPY(E) for(usz i=0; i<ia; i++) rp[i]=bitp_get(xp,i);
#endif
#define MAKE_ICPY(T,E) T##Arr* cpy##T##Arr(B x) { \
usz ia = a(x)->ia; \
E* rp; Arr* r = m_##E##arrp(&rp, ia); \
arr_shCopy(r, x); \
u8 xe = TI(x,elType); \
if (xe==el_bit) { u64* xp = bitarr_ptr(x); BIT_ICPY(E) } \
else if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_i16) { i16* xp = i16any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_i32) { i32* xp = i32any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_f64) { f64* xp = f64any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else { \
B* xp = arr_bptr(x); \
if (xp!=NULL) { for (usz i=0; i<ia; i++) rp[i]=o2fu(xp[i] ); } \
else { SGetU(x) for (usz i=0; i<ia; i++) rp[i]=o2fu(GetU(x,i)); } \
} \
decG(x); \
return (T##Arr*)r; \
}
MAKE_ICPY(I8, i8)
MAKE_ICPY(I16, i16)
MAKE_ICPY(I32, i32)
MAKE_ICPY(F64, f64)
#undef MAKE_ICPY
#define MAKE_CCPY(T,E) \
T##Arr* cpy##T##Arr(B x) { \
usz ia = a(x)->ia; \
T##Atom* rp; Arr* r = m_##E##arrp(&rp, ia); \
arr_shCopy(r, x); \
u8 xe = TI(x,elType); \
if (xe==el_c8 ) { u8* xp = c8any_ptr (x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_c16) { u16* xp = c16any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_c32) { u32* xp = c32any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else { \
B* xp = arr_bptr(x); \
if (xp!=NULL) { for (usz i=0; i<ia; i++) rp[i]=o2cu(xp[i] ); } \
else { SGetU(x) for (usz i=0; i<ia; i++) rp[i]=o2cu(GetU(x,i)); } \
} \
decG(x); \
return (T##Arr*)r; \
}
MAKE_CCPY(C8, c8)
MAKE_CCPY(C16, c16)
MAKE_CCPY(C32, c32)
#undef MAKE_CCPY
BitArr* cpyBitArr(B x) {
usz ia = a(x)->ia;
u64* rp; Arr* r = m_bitarrp(&rp, ia);
arr_shCopy(r, x);
u8 xe = TI(x,elType);
if (xe==el_bit) { u64* xp = bitarr_ptr(x); for(usz i=0; i<BIT_N(ia); i++) rp[i] = xp[i]; }
else if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else if (xe==el_i16) { i16* xp = i16any_ptr(x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else if (xe==el_i32) { i32* xp = i32any_ptr(x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else if (xe==el_f64) { f64* xp = f64any_ptr(x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else {
B* xp = arr_bptr(x);
if (xp!=NULL) { for (usz i=0; i<ia; i++) bitp_set(rp,i,o2fu(xp[i] )); }
else { SGetU(x) for (usz i=0; i<ia; i++) bitp_set(rp,i,o2fu(GetU(x,i))); }
}
decG(x);
return (BitArr*)r;
}
static Arr* bitarr_slice(B x, usz s, usz ia) {
u64* rp; Arr* r = m_bitarrp(&rp, ia);
bit_cpy(rp, 0, bitarr_ptr(x), s, ia);

View File

@ -22,6 +22,8 @@ def v2i{x:T & w256{T}} = [32]u8 ~~ x # for compact casting for the annoying intr
def v2f{x:T & w256{T}} = [8]f32 ~~ x
def v2d{x:T & w256{T}} = [4]f64 ~~ x
def undefPromote{T, x:X & w128{X} & w256{T}} = T~~emit{[32]u8, '_mm256_castsi128_si256', v2i{x}}
def undefPromote{T, x:X & width{T}==width{X}} = T~~x
# load & store
def load {a:T, n & w256i{eltype{T} }} = emit{eltype{T}, '_mm256_loadu_si256', a+n}
@ -101,8 +103,12 @@ def floor{a:[4]f64} = emit{[4]f64, '_mm256_floor_pd', a}
def ceil{a:[4]f64} = emit{[4]f64, '_mm256_ceil_pd', a}
# conversion
def cvt{F==i32, T==[4]f64, a:A & w128i{A}} = emit{T, '_mm256_cvtepi32_pd', a}
def half{x:T, i & w256{T} & knum{i}} = [vcount{T}/2](eltype{T}) ~~ emit{[8]i16, '_mm256_extracti128_si256', v2i{x}, i}
def pair{a:T,b:T & width{T}==128} = [vcount{T}*2](eltype{T}) ~~ emit{[8]i32, '_mm256_setr_m128i', a, b}
def pair{x} = pair{tupsel{0,x},tupsel{1,x}}
def cvt{F==i32, T==[4]f64, a:A & w128i{A}} = emit{T, '_mm256_cvtepi32_pd', a}
def cvt{F, T, a:A & w256{A} & width{F}<width{eltype{T}}} = cvt{F, T, half{a, 0}}
# structural operations
def extract{x:T, i & w256i{T,32} & knum{i}} = emit{eltype{T}, '_mm256_extract_epi32', x, i}
@ -122,11 +128,6 @@ def insert{x:T, i, v & w256i{T,64} & knum{i}} = emit{T, '_mm256_insert_epi64', x
def blend{f:T, t:T, m:M & w256{T} & w256i{M,32}} = T ~~ emit{[8]f32, '_mm256_blendv_ps', v2f{f}, v2f{t}, v2f{m}}
def blend{f:T, t:T, m:M & w256{T} & w256i{M,64}} = T ~~ emit{[4]f64, '_mm256_blendv_pd', v2d{f}, v2d{t}, v2d{m}}
# mixed-width operations
def half{x:T, i & w256{T} & knum{i}} = [vcount{T}/2](eltype{T}) ~~ emit{[8]i16, '_mm256_extracti128_si256', v2i{x}, i}
def pair{a:T,b:T & width{T}==128} = [vcount{T}*2](eltype{T}) ~~ emit{[8]i32, '_mm256_setr_m128i', a, b}
def pair{x} = pair{tupsel{0,x},tupsel{1,x}}
# mask stuff
def getmask{x:T & w256{T, 32}} = emit{u8, '_mm256_movemask_ps', v2f{x}}
def getmask{x:T & w256{T, 64}} = emit{u8, '_mm256_movemask_pd', v2d{x}}

View File

@ -100,8 +100,8 @@ def __subs{a:T,b:T & T==[32]u8 } = emit{T, '_mm256_subs_epu8', a, b}
# structural operations
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 32} & w256i{M, 32}} = emit{void, '_mm256_maskstore_epi32', *i32 ~~ (a+n), m, v}
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 64} & w256i{M, 64}} = emit{void, '_mm256_maskstore_pd', *f64 ~~ (a+n), m, v}
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 32} & w256i{M, 32}} = emit{void, '_mm256_maskstore_epi32', *i32 ~~ (a+n), m, [8]i32~~v}
def maskstore{a:T, m:M, n, v & w256{eltype{T}, 64} & w256i{M, 64}} = emit{void, '_mm256_maskstore_pd', *f64 ~~ (a+n), m, [4]f64~~v}
# maskstore with all cases defined, at the cost of not being a single instruction
def maskstoreF{p, m, n, x:T} = store{p, n, blendF{load{p,n}, x, m}}
@ -145,24 +145,41 @@ def anyneg{x:T & w256s{T, 16}} = getmask{[32]u8 ~~ ([16]i16~~x < broadcast{[16]i
# conversion
# convert packed elements of type F to a result T
# TODO F param is unnecessary if correct element type of A is enforced
def cvt{F==u8, T==[16]u16, a:A & w128i{A}} = emit{T, '_mm256_cvtepu8_epi16', a}
def cvt{F==u8, T==[8]u32, a:A & w128i{A}} = emit{T, '_mm256_cvtepu8_epi32', a}
def cvt{F==u8, T==[4]u64, a:A & w128i{A}} = emit{T, '_mm256_cvtepu8_epi64', a}
def cvt{F==u16, T==[8]u32, a:A & w128i{A}} = emit{T, '_mm256_cvtepu16_epi32', a}
def cvt{F==u16, T==[4]u64, a:A & w128i{A}} = emit{T, '_mm256_cvtepu16_epi64', a}
def cvt{F==u32, T==[4]u64, a:A & w128i{A}} = emit{T, '_mm256_cvtepu32_epi64', a}
def cvt{F==i8, T==[16]i16, a:A & w128i{A}} = emit{T, '_mm256_cvtepi8_epi16', a}
def cvt{F==i8, T==[8]i32, a:A & w128i{A}} = emit{T, '_mm256_cvtepi8_epi32', a}
def cvt{F==i8, T==[4]i64, a:A & w128i{A}} = emit{T, '_mm256_cvtepi8_epi64', a}
def cvt{F==i16, T==[8]i32, a:A & w128i{A}} = emit{T, '_mm256_cvtepi16_epi32', a}
def cvt{F==i16, T==[4]i64, a:A & w128i{A}} = emit{T, '_mm256_cvtepi16_epi64', a}
def cvt{F==i32, T==[4]i64, a:A & w128i{A}} = emit{T, '_mm256_cvtepi32_epi64', a}
def cvt{F , T==[4]f64, a:A & w128i{A} & F!=i32} = cvt{i32, T, cvt{F, [4]i32, a}}
def cvt{F, T, a:A & eltype{T}==F} = T~~a
# undo cvt; assumes elements of X fit in T, otherwise may be saturated; but keeps the width of x, with trailing elements undefined
# def cvt0{F, T, a} = { show{'cvt invocation', F, T, a}; 0 }
# def cvt{F, T, a & cvt0{F, T, a}} = 123
# undo cvt; assumes elements of X fit in T, otherwise may be saturated or otherwise corrupted; but keeps the width of x, with trailing elements undefined
def ucvt{T, x:X & w256i{X,32} & width{T}==8} = {
a:= packQ{x, x}
b:= packQ{a, a}
to_el{T, sel{[8]u32, b, make{[8]i32, 0,4,0,4,0,4,0,4}}}
}
def ucvt{T, x:X & w256i{X,32} & width{T}==16} = {
to_el{T, shuf{[4]u64, packQ{x, x}, 4b3120}}
}
def ucvt{T, x:X & w256{X} & width{T}==width{eltype{X}}} = to_el{T, x}
def ucvt{T, x:X & w256i{X,32} & width{T}==16} = to_el{T, shuf{[4]u64, packQ{x, x}, 4b3120}}
def ucvt{T, x:X & w256i{X,16} & width{T}== 8} = to_el{T, shuf{[4]u64, packQ{x, x}, 4b3120}}
def ucvt{T, x:X & w256f{X,64} & T<i32} = ucvt{T, ucvt{i32, x}}
def ucvt{T, x:X & w256f{X,64} & T==i32} = to_el{T, undefPromote{[8]i32, emit{[4]i32, '_mm256_cvtpd_epi32', x}}}
def ucvt{T, x:X & w256u{X,64} & T==u32} = to_el{T, sel{[8]i32, x, make{[8]i32, 2*iota{8}}}}
def ucvt{T, x:X & w256u{X,64} & T==u16} = to_el{T, sel{[16]i8, ucvt{u32,x}, make{[32]i8, (iota{32}>>1<<2) | (iota{32}&1)}}}
def ucvt{T, x:X & w256u{X,64} & T== u8} = to_el{T, sel{[16]i8, ucvt{u32,x}, make{[32]i8, 4*iota{32}}}}
def ucvt{T, x:X & w256{X} & width{T}==width{eltype{X}}} = to_el{T, x} # TODO check for not being f64/i64

View File

@ -16,8 +16,14 @@ def tail{n,x} = x - (x>>n << n) # get the n least significant bits
def bit {k,x} = tail{1,x>>k} << k # get the k-th bit
def rare{x:u1} = emit{u1, '__builtin_expect', x, 0}
def assert{x & x==0} = assert{'failed assertion'}
def assert{x & x==1} = 1
def assert{x:u1} = emit{void, 'si_assert', x}
def min{a, b & knum{a} & knum{b}} = tern{a<b, a, b}
def max{a, b & knum{a} & knum{b}} = tern{a>b, a, b}
# various checks
def knum{x} = match{kind{x},'number'}
@ -101,7 +107,7 @@ def broadcast{n, v & knum{n}} = @collect(n) v
# debug stuff
def printfType{T} = tern{isfloat{T}, '%.17g', merge{'%', tern{width{T}==64, 'l', ''}, tern{issigned{T}, 'd', 'u'}}}
def print{x & match{kind{x},'symbol'}} = { emit{void, 'printf', '"%s"', merge{'"', x, '"'}}; x }
def print{x & anyNum{x}} = { emit{void, 'printf', tern{issigned{x}, '"%d"', '"%u"'}, x}; x }
def print{x:T & anyNum{x} & width{T}==64} = { emit{void, 'printf', tern{issigned{x}, '"%ld"', '"%lu"'}, x}; x }
def print{x:T & anyNum{x}} = { emit{void, 'printf', merge{'"', printfType{T}, '"'}, x}; x }
def println{x} = { print{x}; print{'\n'}; x }

View File

@ -9,6 +9,18 @@ def q_chr{T,x & T==u8 } = bcall{u1, 'q_c8', x}
def q_chr{T,x & T==u16} = bcall{u1, 'q_c16', x}
def q_chr{T,x & T==u32} = bcall{u1, 'q_c32', x}
def cbqn_c32Tag{} = emit{u64, '', 'C32_TAG'}
def cbqn_tagTag{} = emit{u64, '', 'TAG_TAG'}
def cbqn_varTag{} = emit{u64, '', 'VAR_TAG'}
def cbqn_extTag{} = emit{u64, '', 'EXT_TAG'}
def cbqn_rawTag{} = emit{u64, '', 'RAW_TAG'}
def cbqn_md1Tag{} = emit{u64, '', 'MD1_TAG'}
def cbqn_md2Tag{} = emit{u64, '', 'MD2_TAG'}
def cbqn_funTag{} = emit{u64, '', 'FUN_TAG'}
def cbqn_nspTag{} = emit{u64, '', 'NSP_TAG'}
def cbqn_objTag{} = emit{u64, '', 'OBJ_TAG'}
def cbqn_arrTag{} = emit{u64, '', 'ARR_TAG'}
def cbqn_elType{T & T==u1 } = 0
def cbqn_elType{T & T==i8 } = 1
def cbqn_elType{T & T==i16} = 2

View File

@ -0,0 +1,136 @@
include './base'
include './sse3'
include './avx'
include './avx2'
include './mask'
include './cbqnDefs'
include './bitops'
def spreadBits{T==[32]u8, a:u32} = {
def idxs = iota{32}
b:= broadcast{[8]u32, a}
c:= [32]u8~~b
d:= sel{[16]u8, c, make{[32]i8, idxs>>3 + bit{4, idxs}}}
e:= make{[32]u8, 1<<tail{3, idxs}}
e == (d&e)
}
def spreadBits{T, a & vcount{T} <= width{eltype{T}} & w256u{T}} = {
b:= make{T, 1<<iota{vcount{T}}}
b == (b & broadcast{T, a})
}
def copyFromBits{T, xp: *u64, rp: *eltype{T}, l:u64} = {
def bulk = vcount{T}
def TU = ty_u{T}
maskedLoop{bulk, l, {i, M} => {
x:= b_getBatch{bulk, xp, i} # TODO unroll f64 by two to make b_getBatch not sad
y:= spreadBits{TU, x}
r:= y & TU~~broadcast{T, 1}
storeBatch{rp, i, T~~r, M}
}}
}
copy{vw, X, R}(x: *u8, r: *u8, l:u64, xRaw: *u8) : void = {
assert{l!=0}
def bulk = vw/max{width{X}, width{R}}
xp:= *tern{X==u1, u64, X} ~~ x
rp:= *tern{R==u1, u64, R} ~~ r
def XV = [bulk]X
def RV = [bulk]R
if (R==u64) {
# show{'R==u64', X, R}
assert{((X==u8) | (X==u16)) | (X==u32)}
maskedLoop{bulk, l, {i, M} => { # TODO could maybe read 256 bits and use unpack to write >256
v:= loadBatch{xp, i, RV}
v|= broadcast{RV, cbqn_c32Tag{} << 48}
storeBatch{rp, i, v, M}
}}
} else if (X==u1 and R==u1) {
# show{'u1u1', X, R}
def V64 = [vw/64]u64
maskedLoop{vcount{V64}, cdiv{l, 64}, {i, M} => {
v:= loadBatch{xp, i, V64}
storeBatch{rp, i, v, M}
}}
} else if (X==u1) {
# show{'X==u1', X, R}
copyFromBits{[bulk]R, *u64~~x, *R~~r, l}
} else if (R==u1) {
# show{'R==u1', X, R}
def XU = ty_u{XV}
maskedLoop{bulk, l, {i, M} => {
v:= loadBatch{xp, i, XV}
r:= getmask{(XU~~v) == XU~~broadcast{XV,1}}
b_setBatch{vcount{XV}, rp, i, r} # TODO something more special for f64
}}
} else if (width{X}<=width{R}) {
# show{'w{X}<=w{R}', X, R}
maskedLoop{bulk, l, {i, M} => {
v:= loadBatch{xp, i, RV}
storeBatch{rp, i, v, M}
}}
} else {
# show{'w{X}>w{R}', X, R}
maskedLoop{bulk, l, {i, M} => {
v:= loadBatch{xp, i, XV}
storeBatch{rp, i, v, M}
}}
}
}
# avx2_copy_src_dst
# x→int & equal-width copies
'avx2_copy_1_1' = copy{256, u1, u1}
'avx2_copy_1_i8' = copy{256, u1, i8}
'avx2_copy_1_i16' = copy{256, u1, i16}
'avx2_copy_1_i32' = copy{256, u1, i32}
'avx2_copy_i8_1' = copy{256, i8, u1}
'avx2_copy_i8_i8', 'avx2_copy_c8_c8' = copy{256, i8, i8}
'avx2_copy_i8_i16' = copy{256, i8, i16}
'avx2_copy_i8_i32' = copy{256, i8, i32}
'avx2_copy_i16_1' = copy{256, i16, u1}
'avx2_copy_i16_i8' = copy{256, i16, i8}
'avx2_copy_i16_i16', 'avx2_copy_c16_c16' = copy{256, i16, i16}
'avx2_copy_i16_i32' = copy{256, i16, i32}
'avx2_copy_i32_1' = copy{256, i32, u1}
'avx2_copy_i32_i8' = copy{256, i32, i8}
'avx2_copy_i32_i16' = copy{256, i32, i16}
'avx2_copy_i32_i32', 'avx2_copy_c32_c32'= copy{256, i32, i32}
'avx2_copy_f64_1' = copy{256, f64, u1}
'avx2_copy_f64_i8' = copy{256, f64, i8}
'avx2_copy_f64_i16' = copy{256, f64, i16}
'avx2_copy_f64_i32' = copy{256, f64, i32}
# x→f64, x→B (no avx2_copy_B_B because that may possibly need refcounting)
'avx2_copy_1_f64', 'avx2_copy_1_B' = copy{256, u1, f64}
'avx2_copy_i8_f64', 'avx2_copy_i8_B' = copy{256, i8, f64}
'avx2_copy_i16_f64', 'avx2_copy_i16_B' = copy{256, i16, f64}
'avx2_copy_i32_f64', 'avx2_copy_i32_B' = copy{256, i32, f64}
'avx2_copy_f64_f64', 'avx2_copy_f64_B' = copy{256, f64, f64}
# chr→x
'avx2_copy_c8_c16' = copy{256, u8, u16}
'avx2_copy_c8_c32' = copy{256, u8, u32}
'avx2_copy_c8_B' = copy{256, u8, u64}
'avx2_copy_c16_c8' = copy{256, u16, u8}
'avx2_copy_c16_c32' = copy{256, u16, u32}
'avx2_copy_c16_B' = copy{256, u16, u64}
'avx2_copy_c32_c8' = copy{256, u32, u8}
'avx2_copy_c32_c16' = copy{256, u32, u16}
'avx2_copy_c32_B' = copy{256, u32, u64}
# B→chr
'avx2_copy_B_c8' = copy{256, u64, u8}
'avx2_copy_B_c16' = copy{256, u64, u16}
'avx2_copy_B_c32' = copy{256, u64, u32}

View File

@ -55,7 +55,8 @@ equal{W, X}(w:*u8, x:*u8, l:u64, d:u64) : u1 = {
def fac = width{X}/width{W}
maskedLoop{bulk, l, {i, M} => {
cw:= load{*ww{tern{fac==1, 256, 128}, u8} ~~ (w + i*32/fac)}
# TODO update this to modern mask stuff
cw:= load{*ww{tern{fac==1, 256, 128}, W} ~~ (w + i*32/fac)}
cx:= load{*ww{256, X} ~~ x, i}
cwc:= cvt{W, ww{256, X}, cw}
if (anyne{cwc,cx,M}) return{0}

View File

@ -1,38 +0,0 @@
include './base'
include './sse3'
include './avx'
include './avx2'
include './mask'
include './bitops'
def bitspread{T==[32]u8, a:u32} = {
def idxs = iota{32}
b:= broadcast{[8]u32, a}
c:= [32]u8~~b
d:= sel{[16]u8, c, make{[32]i8, idxs>>3 + bit{4, idxs}}}
e:= make{[32]u8, 1<<tail{3, idxs}}
e == (d&e)
}
def bitspread{T, a & vcount{T} <= width{eltype{T}} & w256u{T}} = {
b:= make{T, 1<<iota{vcount{T}}}
b == (b & broadcast{T, a})
}
bitexpand{T}(xp: *u64, rp:*eltype{T}, l:u64) : void = {
def bulk = vcount{T}
def TU = ty_u{T}
maskedLoop{vcount{T}, l, {i, M} => {
x:= b_getBatch{bulk, xp, i} # TODO unroll f64 by two to make b_getBatch not sad
y:= bitspread{TU, x}
r:= y & TU~~broadcast{T, 1}
storeBatch{rp, i, T~~r, M}
}}
}
'avx2_expand_1_i8' = bitexpand{[32]i8}
'avx2_expand_1_i16' = bitexpand{[16]i16}
'avx2_expand_1_i32' = bitexpand{[ 8]i32}
'avx2_expand_1_f64' = bitexpand{[ 4]f64}

View File

@ -47,9 +47,22 @@ def maskAfter{n} = {
# store low packed elements of x to P; TODO the w parameter isn't doing much?
def storeLow{ptr:P, w, x:T & width{eltype{P}} == width{eltype{T}}} = store{*T~~ptr, 0, x}
def storeLow{ptr:P, w, x:T & w256{T} & w==64} = store{*u64~~ptr, 0, extract{[4]u64~~x, 0}}
# store low packed elements of x to P
def storeLow{ptr:P, w, x:T & width{T}==w} = store{*T~~ptr, 0, x}
def storeLow{ptr:P, w, x:T & w256{T} & w==128} = store{*[128/width{eltype{T}}](eltype{T})~~ptr, 0, half{x, 0}}
def storeLow{ptr:P, w, x:T & w256{T} & w==64} = emit{void, '_mm_storeu_si64', ptr, v2i{half{x, 0}}}
def storeLow{ptr:P, w, x:T & w256{T} & w==32} = emit{void, '_mm_storeu_si32', ptr, v2i{half{x, 0}}}
def storeLow{ptr:P, w, x:T & w256{T} & w==16} = emit{void, '_mm_storeu_si16', ptr, v2i{half{x, 0}}}
def loadLow{ptr:P, w & w128{eltype{P}} & w==128} = eltype{P} ~~ load{*[16]u8 ~~ ptr}
def loadLow{ptr:P, w & w128{eltype{P}} & w== 64} = eltype{P} ~~ emit{[16]u8, '_mm_loadu_si64', ptr}
def loadLow{ptr:P, w & w128{eltype{P}} & w== 32} = eltype{P} ~~ emit{[16]u8, '_mm_loadu_si32', ptr}
def loadLow{ptr:P, w & w128{eltype{P}} & w== 16} = eltype{P} ~~ emit{[16]u8, '_mm_loadu_si16', ptr}
def loadLow{ptr:P, w & w256{eltype{P}} & w<256} = undefPromote{eltype{P}, loadLow{*[16]u8 ~~ ptr, w}}
def loadLow{ptr:P, w & width{eltype{P}} == w} = load{*eltype{P} ~~ ptr}
def loadLowBatch{T, ptr:P, w, n & eltype{P}==eltype{T}} = loadLow{*T ~~ (ptr + n*(w/width{eltype{P}})), w}
# store vcount{T} items into the n'th batch of ptr elements, compressing the items if needed; masked by M
def storeBatch{ptr:P, n, x:T, M} = {
@ -66,11 +79,10 @@ def loadBatch{ptr:P, n, T} = {
def rpos = ptr + n*vcount{T}
def E0 = eltype{P}
if (width{eltype{T}} == width{E0}) load{*T ~~ rpos}
else cvt{E0, T, load{*[16]u8 ~~ rpos}}
cvt{E0, T, loadLow{*to_el{E0, T} ~~ rpos, vcount{T}*width{E0}}}
}
def maskedLoop{bulk, l, step} = {
def maskedLoop{bulk, l, step} = { # TODO version assuming l!=0
m:u64 = l/bulk
@for (i to m) step{i, maskNone}

View File

@ -5,8 +5,6 @@ include './avx'
include './avx2'
include './mask'
def min{a, b & knum{a} & knum{b}} = tern{a<b, a, b}
def minBulk{w, A, B & width{A}< width{B}} = w/width{B}
def minBulk{w, A, B & width{A}>=width{B}} = w/width{A}

View File

@ -22,7 +22,7 @@ def comp16{w:*u64, X, r:*i16, l:u64} = {
@for(w in reinterpret{*u8,w} over i to cdiv{l,8}) {
def step{w} = {
pc:= popc{w}
store{reinterpret{*u64,r}, 0, pext{promote{u64,X{}}, load{c16lut, w}}}
store{reinterpret{*u64,r}, 0, pext{promote{u64,X{}}, load{c16lut, w}}} # TODO don't use unaligned store to make valgrind/ubsan happy
r+= pc
}
step{w&15}

View File

@ -85,7 +85,7 @@ def extract{x:T, i & w128i{T,16} & knum{i}} = emit{eltype{T}, '_mm_extract_epi16
# debug stuff
def printGen{x, s, n} = apply{emit, merge{tup{void, 'printf', merge{'"', apply{merge,each{{c}=>{if(c>0) merge{',',s}; else s}, iota{n}}}, '"'}}, each{{c}=>extract{x,c}, iota{n}}}}
def print{x:T & isvec{T}} = printGen{x, merge{'%', tern{width{eltype{T}}==64, 'l', ''}, tern{issigned{x}, 'd', 'u'}}, vcount{T}}
def print{x:T & isvec{T}} = printGen{x, printfType{eltype{T}}, vcount{T}}
@ -97,6 +97,7 @@ def extract{x:T, i & w128i{T,32} & knum{i}} = emit{eltype{T}, '_mm_extract_epi32
def extract{x:T, i & w128i{T,64} & knum{i}} = emit{eltype{T}, '_mm_extract_epi64', x, i}
# conversion
def cvt{F, T, a:A & eltype{T}==F} = T~~a
def cvt{F==i8, T==[8]i16, a:A & w128i{A}} = emit{T, '_mm_cvtepi8_epi16', a}
def cvt{F==i8, T==[4]i32, a:A & w128i{A}} = emit{T, '_mm_cvtepi8_epi32', a}
def cvt{F==i16, T==[4]i32, a:A & w128i{A}} = emit{T, '_mm_cvtepi16_epi32', a}

View File

@ -179,6 +179,154 @@ DEF_G(void, copy, B, (Mut* m, usz ms, B x, usz xs, usz l)) {
}
}
#if SINGELI
#include <xmmintrin.h>
#if __GNUC__ && !__clang__ // yay gcc
__m128i _mm_loadu_si32(void* p) {
return (__m128i) _mm_load_ss(p);
}
void _mm_storeu_si32(void* p, __m128i x) {
_mm_store_ss(p, _mm_castsi128_ps(x));
}
#endif
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#include "../singeli/gen/copy.c"
#pragma GCC diagnostic pop
typedef void (*copy_fn)(u8*, u8*, u64, u8*);
static void badCopy(u8* xp, u8* rp, u64 len, u8* xRaw) {
err("Copying wrong array type");
}
#define COPY_FN(X,R) avx2_copy_##X##_##R
#define MAKE_CPY(MAKE, GET, WR, XRP, H2T, T, ...) \
static copy_fn copy##T##Fns[10]; \
static void cpy##T##Arr_B(u8* xp, u8* rp, u64 ia, u8* xRaw) { \
Arr* xa = (Arr*)xRaw; B* bxp = arrV_bptr(xa); \
if (bxp!=NULL && sizeof(B)==sizeof(f64)) { \
H2T; \
} else { AS2B fn = TIv(xa,GET); \
for (usz i=0; i<ia; i++) WR(fn(xa,i)); \
} \
} \
static copy_fn copy##T##Fns[] = __VA_ARGS__; \
T##Arr* cpy##T##Arr(B x) { \
usz ia = a(x)->ia; \
MAKE; arr_shCopy(r, x); \
if (ia>0) { \
copy##T##Fns[TI(x,elType)](tyany_ptr(x), (u8*)(XRP), ia, (u8*)a(x)); \
} \
decG(x); \
return (T##Arr*)r; \
}
#define BIT_PUT(V) bitp_set((u64*)rp, i, o2bu(V))
#define H2T_COPY(T) copy##T##Fns[el_MAX]((u8*)bxp, rp, ia, xRaw)
#define MAKE_TYCPY(T, E, F, ...) MAKE_CPY(T##Atom* rp; Arr* r = m_##E##arrp(&rp, ia), getU, ((T##Atom*)rp)[i] = F, rp, H2T_COPY(T), T, __VA_ARGS__)
#define MAKE_CCPY(T,E) MAKE_TYCPY(T, E, o2cu, {badCopy, badCopy, badCopy, badCopy, badCopy, COPY_FN(c8,E),COPY_FN(c16,E),COPY_FN(c32,E),cpy##T##Arr_B,COPY_FN(B,E)})
#define MAKE_ICPY(T,E) MAKE_TYCPY(T, E, o2fu, {COPY_FN(1,E),COPY_FN(i8,E),COPY_FN(i16,E),COPY_FN(i32,E),COPY_FN(f64,E),badCopy, badCopy, badCopy, cpy##T##Arr_B,COPY_FN(f64,E)})
MAKE_CPY(HArr_p p = m_harrUp(ia); Arr* r = (Arr*)p.c, get, ((B*)rp)[i] =, p.a, for (usz i=0; i<ia; i++) ((B*)rp)[i] = inc(bxp[i]),
H, {COPY_FN(1,B),COPY_FN(i8,B),COPY_FN(i16,B),COPY_FN(i32,B),COPY_FN(f64,B),COPY_FN(c8,B),COPY_FN(c16,B),COPY_FN(c32,B),cpyHArr_B, COPY_FN(f64,B)})
MAKE_CPY(u64* rp; Arr* r = m_bitarrp(&rp, ia), getU, BIT_PUT, rp, H2T_COPY(Bit),
Bit, {COPY_FN(1,1),COPY_FN(i8,1),COPY_FN(i16,1),COPY_FN(i32,1),COPY_FN(f64,1),badCopy, badCopy, badCopy, cpyBitArr_B, COPY_FN(f64,1)})
#else
#define MAKE_ICPY(T,E) T##Arr* cpy##T##Arr(B x) { \
usz ia = a(x)->ia; \
E* rp; Arr* r = m_##E##arrp(&rp, ia); \
arr_shCopy(r, x); \
u8 xe = TI(x,elType); \
if (xe==el_bit) { u64* xp = bitarr_ptr(x); for(usz i=0; i<ia; i++) rp[i]=bitp_get(xp,i); } \
else if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_i16) { i16* xp = i16any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_i32) { i32* xp = i32any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_f64) { f64* xp = f64any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else { \
B* xp = arr_bptr(x); \
if (xp!=NULL) { for (usz i=0; i<ia; i++) rp[i]=o2fu(xp[i] ); } \
else { SGetU(x) for (usz i=0; i<ia; i++) rp[i]=o2fu(GetU(x,i)); } \
} \
decG(x); \
return (T##Arr*)r; \
}
#define MAKE_CCPY(T,E) \
T##Arr* cpy##T##Arr(B x) { \
usz ia = a(x)->ia; \
T##Atom* rp; Arr* r = m_##E##arrp(&rp, ia); \
arr_shCopy(r, x); \
u8 xe = TI(x,elType); \
if (xe==el_c8 ) { u8* xp = c8any_ptr (x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_c16) { u16* xp = c16any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else if (xe==el_c32) { u32* xp = c32any_ptr(x); for(usz i=0; i<ia; i++) rp[i]=xp[i]; } \
else { \
B* xp = arr_bptr(x); \
if (xp!=NULL) { for (usz i=0; i<ia; i++) rp[i]=o2cu(xp[i] ); } \
else { SGetU(x) for (usz i=0; i<ia; i++) rp[i]=o2cu(GetU(x,i)); } \
} \
decG(x); \
return (T##Arr*)r; \
}
HArr* cpyHArr(B x) {
usz ia = a(x)->ia;
HArr_p r = m_harrUc(x);
u8 xe = TI(x,elType);
if (xe==el_bit) { u64* xp = bitarr_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(bitp_get(xp, i)); }
else if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_i16) { i16* xp = i16any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_i32) { i32* xp = i32any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_f64) { f64* xp = f64any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_f64(xp[i]); }
else if (xe==el_c8 ) { u8* xp = c8any_ptr (x); for(usz i=0; i<ia; i++) r.a[i]=m_c32(xp[i]); }
else if (xe==el_c16) { u16* xp = c16any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_c32(xp[i]); }
else if (xe==el_c32) { u32* xp = c32any_ptr(x); for(usz i=0; i<ia; i++) r.a[i]=m_c32(xp[i]); }
else {
B* xp = arr_bptr(x);
if (xp!=NULL) { for (usz i=0; i<ia; i++) r.a[i] = inc(xp[i]); }
else { SGet(x) for (usz i=0; i<ia; i++) r.a[i] = Get(x, i); }
}
dec(x);
return r.c;
}
BitArr* cpyBitArr(B x) {
usz ia = a(x)->ia;
u64* rp; Arr* r = m_bitarrp(&rp, ia);
arr_shCopy(r, x);
u8 xe = TI(x,elType);
if (xe==el_bit) { u64* xp = bitarr_ptr(x); for(usz i=0; i<BIT_N(ia); i++) rp[i] = xp[i]; }
else if (xe==el_i8 ) { i8* xp = i8any_ptr (x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else if (xe==el_i16) { i16* xp = i16any_ptr(x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else if (xe==el_i32) { i32* xp = i32any_ptr(x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else if (xe==el_f64) { f64* xp = f64any_ptr(x); for(usz i=0; i<ia; i++) bitp_set(rp,i,xp[i]); }
else {
B* xp = arr_bptr(x);
if (xp!=NULL) { for (usz i=0; i<ia; i++) bitp_set(rp,i,o2fu(xp[i] )); }
else { SGetU(x) for (usz i=0; i<ia; i++) bitp_set(rp,i,o2fu(GetU(x,i))); }
}
decG(x);
return (BitArr*)r;
}
#endif
MAKE_ICPY(I8, i8)
MAKE_ICPY(I16, i16)
MAKE_ICPY(I32, i32)
MAKE_ICPY(F64, f64)
MAKE_CCPY(C8, c8)
MAKE_CCPY(C16, c16)
MAKE_CCPY(C32, c32)
#undef BIT_PUT
#undef MAKE_CCPY
#undef MAKE_ICPY
#undef MAKE_CPY
#undef COPY_FN
static B m_getU_MAX(Mut* m, usz ms) { err("m_setG_MAX"); }
static B m_getU_bit(Mut* m, usz ms) { return m_i32(bitp_get(m->abit, ms)); }
static B m_getU_i8 (Mut* m, usz ms) { return m_i32(m->ai8 [ms]); }

View File

@ -8,6 +8,7 @@ test/x86Cfgs.sh path/to/mlochbaum/BQN // run the test suite for x86-64-specific
test/moreCfgs.sh path/to/mlochbaum/BQN // run "2+2" in a bunch of configurations
./BQN test/cmp.bqn // fuzz-test scalar comparison functions =≠<≤>≥
./BQN test/equal.bqn // fuzz-test 𝕨≡𝕩
./BQN test/copy.bqn // fuzz-test creating new arrays with elements copied from another
./BQN test/bitcpy.bqn // fuzz-test bit_cpy; requires a CBQN build with -DTEST_BITCPY
./BQN test/squeeze.bqn // fuzz-test squeezing; requires a CBQN build with -DEEQUAL_NEGZERO
```

49
test/copy.bqn Normal file
View File

@ -0,0 +1,49 @@
Variation, ListVariations, ClearRefs •internal
u 100×(•UnixTime+1|100וMonoTime)@
R (•MakeRand •Show u).Range
vn "Ab""Ai8""Ai16""Ai32""Af64""Ac8""Ac16""Ac32""Ah""Af"
vi 0, 0, 0, 0, 1, 2, 2, 2, 1, 1
vf
(5 vn)¨ 5
(35vn)¨ 3
vf (8vn)¨ vf
vf (10) {𝕩 'S'¨ 1¨ (𝕨=0)𝕩}¨ vf
vf {𝕩 "Inc"¨ 𝕩}¨ vf
•Show¨ vf
count 10000
eqlen 0
{𝕊:
n R 26+R 9
v R vn
l
{𝕊: (v0261430) -˜ n R 2v171531}
{𝕊: ÷n R 0}
{𝕊: @+n R (v-5)256655361114112}
vvi
vs vvf
vsLV ListVariations l
{ 𝕊 v0:
a0 v0 Variation l
! a0 l
{ 𝕊:
a1 𝕩 Variation a0
a0 a1?1;
•Out "fail:"
•Show v0"→"𝕩
•Exit 1
}¨ vs
}¨ vs
ClearRefs@
eqlen+ (vs) +´vsvsLV
}¨ count
•Out "Fraction of matching variation count to ListVariations: "(•Repr eqlen÷count)" (expected: ~0.999)"