NEON copy.singeli

This commit is contained in:
dzaima 2023-02-17 21:50:18 +02:00
parent 0aada6163a
commit 1e6b612e13
3 changed files with 66 additions and 60 deletions

View File

@ -554,7 +554,7 @@ cachedBin‿linkerCache ← {
"src/utils/", "ryu.c", "utf.c", "hash.c", "file.c", "mut.c", "each.c", "bits.c" "src/utils/", "ryu.c", "utf.c", "hash.c", "file.c", "mut.c", "each.c", "bits.c"
cbqnSrc cbqnSrc clangd.Files "src" cbqnSrc cbqnSrc clangd.Files "src"
singeliMap {po.arch"aarch64"? 𝕩/˜(1¨𝕩)"cmp""bits""equal""dyarith""monarith""squeeze"; 𝕩} singeliMap {po.arch"aarch64"? 𝕩/˜(1¨𝕩)"cmp""bits""equal""dyarith""monarith""squeeze""copy"; 𝕩}
"src/builtins/arithm.c""monarith", "src/builtins/arithm.c""monarith",
"src/core/stuff.c""equal", "src/utils/mut.c""copy", "src/utils/bits.c""bits" "src/core/stuff.c""equal", "src/utils/mut.c""copy", "src/utils/bits.c""bits"
"src/builtins/arithd.c""dyarith", "src/builtins/cmp.c""cmp", "src/builtins/squeeze.c""squeeze" "src/builtins/arithd.c""dyarith", "src/builtins/cmp.c""cmp", "src/builtins/squeeze.c""squeeze"

View File

@ -1,7 +1,11 @@
include './base' include './base'
include './sse3' if (hasarch{'X86_64'}) {
include './avx' include './sse3'
include './avx2' include './avx'
include './avx2'
} else if (hasarch{'AARCH64'}) {
include './neon'
}
include './mask' include './mask'
include './cbqnDefs' include './cbqnDefs'
include './bitops' include './bitops'
@ -18,7 +22,8 @@ def copyFromBits{T, xp: *u64, rp: *eltype{T}, l:u64} = {
} }
copy{vw, X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = { copy{X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = {
def vw = arch_defvw
assert{l!=0} assert{l!=0}
def bulk = vw/max{width{X}, width{R}} def bulk = vw/max{width{X}, width{R}}
@ -26,6 +31,7 @@ copy{vw, X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = {
rp:= *tern{R==u1, u64, R} ~~ r rp:= *tern{R==u1, u64, R} ~~ r
def XV = [bulk]X def XV = [bulk]X
def RV = [bulk]R def RV = [bulk]R
def ur = tern{hasarch{'AARCH64'}, 4, 1}
if (R==u64) { if (R==u64) {
# show{'R==u64', X, R} # show{'R==u64', X, R}
@ -55,67 +61,67 @@ copy{vw, X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = {
} }
} else if (width{X}<=width{R}) { } else if (width{X}<=width{R}) {
# show{'w{X}<=w{R}', X, R} # show{'w{X}<=w{R}', X, R}
maskedLoop{bulk, l, {i, M} => { muLoop{bulk, ur, l, {is, M} => {
v:= loadBatch{xp, i, RV} def v = loadBatch{xp, is, RV}
storeBatch{rp, i, v, M} storeBatch{rp, is, v, M}
}} }}
} else { } else {
# show{'w{X}>w{R}', X, R} # show{'w{X}>w{R}', X, R}
maskedLoop{bulk, l, {i, M} => { muLoop{bulk, ur, l, {is, M} => {
v:= loadBatch{xp, i, XV} def v = loadBatch{xp, is, XV}
storeBatch{rp, i, v, M} storeBatch{rp, is, v, M}
}} }}
} }
} }
# avx2_copy_src_dst # simd_copy_src_dst
# x→int & equal-width copies # x→int & equal-width copies
'avx2_copy_1_1' = copy{256, u1, u1} 'simd_copy_1_1' = copy{u1, u1}
'avx2_copy_1_i8' = copy{256, u1, i8} 'simd_copy_1_i8' = copy{u1, i8}
'avx2_copy_1_i16' = copy{256, u1, i16} 'simd_copy_1_i16' = copy{u1, i16}
'avx2_copy_1_i32' = copy{256, u1, i32} 'simd_copy_1_i32' = copy{u1, i32}
'avx2_copy_i8_1' = copy{256, i8, u1} 'simd_copy_i8_1' = copy{i8, u1}
'avx2_copy_i8_i8', 'avx2_copy_c8_c8' = copy{256, i8, i8} 'simd_copy_i8_i8', 'simd_copy_c8_c8' = copy{i8, i8}
'avx2_copy_i8_i16' = copy{256, i8, i16} 'simd_copy_i8_i16' = copy{i8, i16}
'avx2_copy_i8_i32' = copy{256, i8, i32} 'simd_copy_i8_i32' = copy{i8, i32}
'avx2_copy_i16_1' = copy{256, i16, u1} 'simd_copy_i16_1' = copy{i16, u1}
'avx2_copy_i16_i8' = copy{256, i16, i8} 'simd_copy_i16_i8' = copy{i16, i8}
'avx2_copy_i16_i16', 'avx2_copy_c16_c16' = copy{256, i16, i16} 'simd_copy_i16_i16', 'simd_copy_c16_c16' = copy{i16, i16}
'avx2_copy_i16_i32' = copy{256, i16, i32} 'simd_copy_i16_i32' = copy{i16, i32}
'avx2_copy_i32_1' = copy{256, i32, u1} 'simd_copy_i32_1' = copy{i32, u1}
'avx2_copy_i32_i8' = copy{256, i32, i8} 'simd_copy_i32_i8' = copy{i32, i8}
'avx2_copy_i32_i16' = copy{256, i32, i16} 'simd_copy_i32_i16' = copy{i32, i16}
'avx2_copy_i32_i32', 'avx2_copy_c32_c32'= copy{256, i32, i32} 'simd_copy_i32_i32', 'simd_copy_c32_c32'= copy{i32, i32}
'avx2_copy_f64_1' = copy{256, f64, u1} 'simd_copy_f64_1' = copy{f64, u1}
'avx2_copy_f64_i8' = copy{256, f64, i8} 'simd_copy_f64_i8' = copy{f64, i8}
'avx2_copy_f64_i16' = copy{256, f64, i16} 'simd_copy_f64_i16' = copy{f64, i16}
'avx2_copy_f64_i32' = copy{256, f64, i32} 'simd_copy_f64_i32' = copy{f64, i32}
# x→f64, x→B (no avx2_copy_B_B because that may possibly need refcounting) # x→f64, x→B (no simd_copy_B_B because that may possibly need refcounting)
'avx2_copy_1_f64', 'avx2_copy_1_B' = copy{256, u1, f64} 'simd_copy_1_f64', 'simd_copy_1_B' = copy{u1, f64}
'avx2_copy_i8_f64', 'avx2_copy_i8_B' = copy{256, i8, f64} 'simd_copy_i8_f64', 'simd_copy_i8_B' = copy{i8, f64}
'avx2_copy_i16_f64', 'avx2_copy_i16_B' = copy{256, i16, f64} 'simd_copy_i16_f64', 'simd_copy_i16_B' = copy{i16, f64}
'avx2_copy_i32_f64', 'avx2_copy_i32_B' = copy{256, i32, f64} 'simd_copy_i32_f64', 'simd_copy_i32_B' = copy{i32, f64}
'avx2_copy_f64_f64', 'avx2_copy_f64_B' = copy{256, f64, f64} 'simd_copy_f64_f64', 'simd_copy_f64_B' = copy{f64, f64}
# chr→x # chr→x
'avx2_copy_c8_c16' = copy{256, u8, u16} 'simd_copy_c8_c16' = copy{u8, u16}
'avx2_copy_c8_c32' = copy{256, u8, u32} 'simd_copy_c8_c32' = copy{u8, u32}
'avx2_copy_c8_B' = copy{256, u8, u64} 'simd_copy_c8_B' = copy{u8, u64}
'avx2_copy_c16_c8' = copy{256, u16, u8} 'simd_copy_c16_c8' = copy{u16, u8}
'avx2_copy_c16_c32' = copy{256, u16, u32} 'simd_copy_c16_c32' = copy{u16, u32}
'avx2_copy_c16_B' = copy{256, u16, u64} 'simd_copy_c16_B' = copy{u16, u64}
'avx2_copy_c32_c8' = copy{256, u32, u8} 'simd_copy_c32_c8' = copy{u32, u8}
'avx2_copy_c32_c16' = copy{256, u32, u16} 'simd_copy_c32_c16' = copy{u32, u16}
'avx2_copy_c32_B' = copy{256, u32, u64} 'simd_copy_c32_B' = copy{u32, u64}
# B→chr # B→chr
'avx2_copy_B_c8' = copy{256, u64, u8} 'simd_copy_B_c8' = copy{u64, u8}
'avx2_copy_B_c16' = copy{256, u64, u16} 'simd_copy_B_c16' = copy{u64, u16}
'avx2_copy_B_c32' = copy{256, u64, u32} 'simd_copy_B_c32' = copy{u64, u32}

View File

@ -133,7 +133,7 @@ DEF_G(void, fill, B , (void* a, usz ms, B x, usz l), ms, x, l) {
} }
#if SINGELI_X86_64 #if SINGELI
#define DEF_COPY(T, BODY) DEF0(void, copy, T, u8 xe=TI(x,elType); u8 ne=el_or(xe,el_##T);, ne==el_##T, ne, (void* a, usz ms, B x, usz xs, usz l), ms, x, xs, l) #define DEF_COPY(T, BODY) DEF0(void, copy, T, u8 xe=TI(x,elType); u8 ne=el_or(xe,el_##T);, ne==el_##T, ne, (void* a, usz ms, B x, usz xs, usz l), ms, x, xs, l)
#else #else
#define DEF_COPY(T, BODY) DEF(void, copy, T, u8 xe=TI(x,elType); u8 ne=el_or(xe,el_##T);, ne==el_##T, ne, (void* a, usz ms, B x, usz xs, usz l), ms, x, xs, l) { u8 xt=TY(x); (void)xt; BODY } #define DEF_COPY(T, BODY) DEF(void, copy, T, u8 xe=TI(x,elType); u8 ne=el_or(xe,el_##T);, ne==el_##T, ne, (void* a, usz ms, B x, usz xs, usz l), ms, x, xs, l) { u8 xt=TY(x); (void)xt; BODY }
@ -217,7 +217,7 @@ DEF_G(void, copy, B, (void* a, usz ms, B x, usz xs, usz l), ms, x, x
} }
} }
#if SINGELI_X86_64 #if SINGELI
#define SINGELI_FILE copy #define SINGELI_FILE copy
#include "./includeSingeli.h" #include "./includeSingeli.h"
typedef void (*copy_fn)(void*, void*, u64, void*); typedef void (*copy_fn)(void*, void*, u64, void*);
@ -226,7 +226,7 @@ DEF_G(void, copy, B, (void* a, usz ms, B x, usz xs, usz l), ms, x, x
err("Copying wrong array type"); err("Copying wrong array type");
} }
#define COPY_FN(X,R) avx2_copy_##X##_##R #define COPY_FN(X,R) simd_copy_##X##_##R
#define MAKE_CPY(TY, MAKE, GET, WR, XRP, H2T, T, ...) \ #define MAKE_CPY(TY, MAKE, GET, WR, XRP, H2T, T, ...) \
static copy_fn copy##T##Fns[10]; \ static copy_fn copy##T##Fns[10]; \
NOINLINE void cpy##T##Arr_BF(void* xp, void* rp, u64 ia, Arr* xa) { \ NOINLINE void cpy##T##Arr_BF(void* xp, void* rp, u64 ia, Arr* xa) { \
@ -261,13 +261,13 @@ DEF_G(void, copy, B, (void* a, usz ms, B x, usz xs, usz l), ms, x, x
Bit, {COPY_FN(1,1),COPY_FN(i8,1),COPY_FN(i16,1),COPY_FN(i32,1),COPY_FN(f64,1),badCopy, badCopy, badCopy, cpyBitArr_B, COPY_FN(f64,1)}) Bit, {COPY_FN(1,1),COPY_FN(i8,1),COPY_FN(i16,1),COPY_FN(i32,1),COPY_FN(f64,1),badCopy, badCopy, badCopy, cpyBitArr_B, COPY_FN(f64,1)})
static copy_fn tcopy_i8Fns [] = {[t_bitarr]=avx2_copy_1_i8, [t_i8arr]=avx2_copy_i8_i8 ,[t_i8slice]=avx2_copy_i8_i8}; static copy_fn tcopy_i8Fns [] = {[t_bitarr]=simd_copy_1_i8, [t_i8arr]=simd_copy_i8_i8 ,[t_i8slice]=simd_copy_i8_i8};
static copy_fn tcopy_i16Fns[] = {[t_bitarr]=avx2_copy_1_i16, [t_i8arr]=avx2_copy_i8_i16,[t_i8slice]=avx2_copy_i8_i16, [t_i16arr]=avx2_copy_i16_i16,[t_i16slice]=avx2_copy_i16_i16}; static copy_fn tcopy_i16Fns[] = {[t_bitarr]=simd_copy_1_i16, [t_i8arr]=simd_copy_i8_i16,[t_i8slice]=simd_copy_i8_i16, [t_i16arr]=simd_copy_i16_i16,[t_i16slice]=simd_copy_i16_i16};
static copy_fn tcopy_i32Fns[] = {[t_bitarr]=avx2_copy_1_i32, [t_i8arr]=avx2_copy_i8_i32,[t_i8slice]=avx2_copy_i8_i32, [t_i16arr]=avx2_copy_i16_i32,[t_i16slice]=avx2_copy_i16_i32, [t_i32arr]=avx2_copy_i32_i32,[t_i32slice]=avx2_copy_i32_i32}; static copy_fn tcopy_i32Fns[] = {[t_bitarr]=simd_copy_1_i32, [t_i8arr]=simd_copy_i8_i32,[t_i8slice]=simd_copy_i8_i32, [t_i16arr]=simd_copy_i16_i32,[t_i16slice]=simd_copy_i16_i32, [t_i32arr]=simd_copy_i32_i32,[t_i32slice]=simd_copy_i32_i32};
static copy_fn tcopy_f64Fns[] = {[t_bitarr]=avx2_copy_1_f64, [t_i8arr]=avx2_copy_i8_f64,[t_i8slice]=avx2_copy_i8_f64, [t_i16arr]=avx2_copy_i16_f64,[t_i16slice]=avx2_copy_i16_f64, [t_i32arr]=avx2_copy_i32_f64,[t_i32slice]=avx2_copy_i32_f64, [t_f64arr]=avx2_copy_f64_f64,[t_f64slice]=avx2_copy_f64_f64}; static copy_fn tcopy_f64Fns[] = {[t_bitarr]=simd_copy_1_f64, [t_i8arr]=simd_copy_i8_f64,[t_i8slice]=simd_copy_i8_f64, [t_i16arr]=simd_copy_i16_f64,[t_i16slice]=simd_copy_i16_f64, [t_i32arr]=simd_copy_i32_f64,[t_i32slice]=simd_copy_i32_f64, [t_f64arr]=simd_copy_f64_f64,[t_f64slice]=simd_copy_f64_f64};
static copy_fn tcopy_c8Fns [] = {[t_c8arr]=avx2_copy_c8_c8 ,[t_c8slice]=avx2_copy_c8_c8}; static copy_fn tcopy_c8Fns [] = {[t_c8arr]=simd_copy_c8_c8 ,[t_c8slice]=simd_copy_c8_c8};
static copy_fn tcopy_c16Fns[] = {[t_c8arr]=avx2_copy_c8_c16,[t_c8slice]=avx2_copy_c8_c16, [t_c16arr]=avx2_copy_c16_c16,[t_c16slice]=avx2_copy_c16_c16}; static copy_fn tcopy_c16Fns[] = {[t_c8arr]=simd_copy_c8_c16,[t_c8slice]=simd_copy_c8_c16, [t_c16arr]=simd_copy_c16_c16,[t_c16slice]=simd_copy_c16_c16};
static copy_fn tcopy_c32Fns[] = {[t_c8arr]=avx2_copy_c8_c32,[t_c8slice]=avx2_copy_c8_c32, [t_c16arr]=avx2_copy_c16_c32,[t_c16slice]=avx2_copy_c16_c32, [t_c32arr]=avx2_copy_c32_c32,[t_c32slice]=avx2_copy_c32_c32}; static copy_fn tcopy_c32Fns[] = {[t_c8arr]=simd_copy_c8_c32,[t_c8slice]=simd_copy_c8_c32, [t_c16arr]=simd_copy_c16_c32,[t_c16slice]=simd_copy_c16_c32, [t_c32arr]=simd_copy_c32_c32,[t_c32slice]=simd_copy_c32_c32};
#define TCOPY_FN(T, N, NUM) static void m_copyG_##N(void* a, usz ms, B x, usz xs, usz l) { \ #define TCOPY_FN(T, N, NUM) static void m_copyG_##N(void* a, usz ms, B x, usz xs, usz l) { \
if (l==0) return; \ if (l==0) return; \