aarch64 fold.singeli
This commit is contained in:
parent
b3f3190028
commit
897c46df76
@ -599,7 +599,7 @@ cachedBin‿linkerCache ← {
|
|||||||
"xa."‿"src/builtins/arithd.c"‿"dyarith", "xa."‿"src/builtins/cmp.c"‿"cmp",
|
"xa."‿"src/builtins/arithd.c"‿"dyarith", "xa."‿"src/builtins/cmp.c"‿"cmp",
|
||||||
"xa."‿"src/builtins/squeeze.c"‿"squeeze", "xa."‿"src/utils/mut.c"‿"copy",
|
"xa."‿"src/builtins/squeeze.c"‿"squeeze", "xa."‿"src/utils/mut.c"‿"copy",
|
||||||
"xa."‿"src/utils/bits.c"‿"bits", "xag"‿"src/builtins/transpose.c"‿"transpose",
|
"xa."‿"src/utils/bits.c"‿"bits", "xag"‿"src/builtins/transpose.c"‿"transpose",
|
||||||
"xa."‿"src/builtins/search.c"‿"search", "x.."‿"src/builtins/fold.c"‿"fold",
|
"xa."‿"src/builtins/search.c"‿"search", "xa."‿"src/builtins/fold.c"‿"fold",
|
||||||
|
|
||||||
"2.."‿"src/builtins/select.c"‿"select", "2.."‿"src/builtins/scan.c"‿"scan",
|
"2.."‿"src/builtins/select.c"‿"select", "2.."‿"src/builtins/scan.c"‿"scan",
|
||||||
"2.."‿"src/builtins/slash.c"‿"constrep", "2.."‿"src/builtins/scan.c"‿"neq",
|
"2.."‿"src/builtins/slash.c"‿"constrep", "2.."‿"src/builtins/scan.c"‿"neq",
|
||||||
|
|||||||
@ -15,7 +15,7 @@
|
|||||||
#include "../builtins.h"
|
#include "../builtins.h"
|
||||||
#include "../utils/mut.h"
|
#include "../utils/mut.h"
|
||||||
|
|
||||||
#if SINGELI_X86_64
|
#if SINGELI_SIMD
|
||||||
#define SINGELI_FILE fold
|
#define SINGELI_FILE fold
|
||||||
#include "../utils/includeSingeli.h"
|
#include "../utils/includeSingeli.h"
|
||||||
#endif
|
#endif
|
||||||
@ -92,7 +92,7 @@ B sum_c1(B t, B x) {
|
|||||||
}
|
}
|
||||||
r += s;
|
r += s;
|
||||||
} else {
|
} else {
|
||||||
#if SINGELI_X86_64
|
#if SINGELI_SIMD
|
||||||
r = simd_sum_f64(xv, ia);
|
r = simd_sum_f64(xv, ia);
|
||||||
#else
|
#else
|
||||||
r=0; for (usz i=0; i<ia; i++) r+=((f64*)xv)[i];
|
r=0; for (usz i=0; i<ia; i++) r+=((f64*)xv)[i];
|
||||||
@ -133,7 +133,7 @@ static f64 (*const prod_fns[])(void*, usz, f64) = { prod_i8, prod_i16, prod_i32,
|
|||||||
static f64 min_##T(void* xv, usz ia) { MIN_MAX(T,<) } \
|
static f64 min_##T(void* xv, usz ia) { MIN_MAX(T,<) } \
|
||||||
static f64 max_##T(void* xv, usz ia) { MIN_MAX(T,>) }
|
static f64 max_##T(void* xv, usz ia) { MIN_MAX(T,>) }
|
||||||
DEF_MIN_MAX(i8) DEF_MIN_MAX(i16) DEF_MIN_MAX(i32)
|
DEF_MIN_MAX(i8) DEF_MIN_MAX(i16) DEF_MIN_MAX(i32)
|
||||||
#if SINGELI_X86_64
|
#if SINGELI_SIMD
|
||||||
static f64 min_f64(void* xv, usz ia) { return simd_fold_min_f64(xv,ia); }
|
static f64 min_f64(void* xv, usz ia) { return simd_fold_min_f64(xv,ia); }
|
||||||
static f64 max_f64(void* xv, usz ia) { return simd_fold_max_f64(xv,ia); }
|
static f64 max_f64(void* xv, usz ia) { return simd_fold_max_f64(xv,ia); }
|
||||||
#else
|
#else
|
||||||
|
|||||||
@ -149,10 +149,8 @@ def unpackHi{...x} = assert{'unpackHi not supported', show{...x}}
|
|||||||
def unpackQ{...x} = assert{'unpackQ not supported', show{...x}}
|
def unpackQ{...x} = assert{'unpackQ not supported', show{...x}}
|
||||||
def packQ{...x} = assert{'packQ not supported', show{...x}}
|
def packQ{...x} = assert{'packQ not supported', show{...x}}
|
||||||
def __mulhi{...x} = assert{'__mulhi not supported', show{...x}}
|
def __mulhi{...x} = assert{'__mulhi not supported', show{...x}}
|
||||||
def fold_max {...x} = assert{'fold_max not supported', show{...x}}
|
|
||||||
def fold_min {...x} = assert{'fold_min not supported', show{...x}}
|
|
||||||
def fold_add {...x} = assert{'fold_add not supported', show{...x}}
|
|
||||||
def fold_addw{...x} = assert{'fold_addw not supported', show{...x}}
|
def fold_addw{...x} = assert{'fold_addw not supported', show{...x}}
|
||||||
|
def vfold{...x} = assert{'vfold not supported', show{...x}}
|
||||||
def narrowPair{...x} = assert{'narrowPair not supported', show{...x}}
|
def narrowPair{...x} = assert{'narrowPair not supported', show{...x}}
|
||||||
def pair{...x} = assert{'pair not supported', show{...x}}
|
def pair{...x} = assert{'pair not supported', show{...x}}
|
||||||
def cvt{...x} = assert{'cvt not supported', show{...x}}
|
def cvt{...x} = assert{'cvt not supported', show{...x}}
|
||||||
|
|||||||
@ -34,21 +34,22 @@ fn fold_idem{T==f64, op}(x:*T, len:u64) : T = {
|
|||||||
def V = [bulk]T
|
def V = [bulk]T
|
||||||
xv:= *V ~~ x
|
xv:= *V ~~ x
|
||||||
r:V = [bulk]f64**0
|
r:V = [bulk]f64**0
|
||||||
|
assert{len > 0}
|
||||||
if (len<bulk) {
|
if (len<bulk) {
|
||||||
# Can't overlap like the long case
|
# Can't overlap like the long case
|
||||||
assert{len > 0}
|
|
||||||
r = load{xv}
|
r = load{xv}
|
||||||
if (bulk==4 and hasarch{'AVX'}) {
|
if (bulk>2) {
|
||||||
|
assert{(bulk==4) & hasarch{'AVX'}}
|
||||||
if (len > 1) {
|
if (len > 1) {
|
||||||
if (len > 2) r = opsh64{op}{r, 4b2222}
|
if (len > 2) r = opsh64{op}{r, 4b2222}
|
||||||
r = opsh64{op}{r, 4b1111}
|
r = opsh64{op}{r, 4b1111}
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
assert{bulk==2}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
i:= load{*V ~~ (x+len-bulk)}
|
i:= load{*V ~~ (x+len-bulk)}
|
||||||
r = mix{op, reduce_pairwise{op, 2, xv, (len-1)/bulk, i}}
|
r0:= reduce_pairwise{op, 2, xv, (len-1)/bulk, i}
|
||||||
|
if (hasarch{'AARCH64'}) return{vfold{op, r0}}
|
||||||
|
else r = mix{op, r0}
|
||||||
}
|
}
|
||||||
extract{r, 0}
|
extract{r, 0}
|
||||||
}
|
}
|
||||||
@ -63,6 +64,7 @@ fn fold_assoc_0{T==f64, op}(x:*T, len:u64) : T = {
|
|||||||
e:= len / bulk
|
e:= len / bulk
|
||||||
i:= load{xv, e} & (V~~maskOf{V, len % bulk})
|
i:= load{xv, e} & (V~~maskOf{V, len % bulk})
|
||||||
r:= reduce_pairwise{op, 2, xv, e, i}
|
r:= reduce_pairwise{op, 2, xv, e, i}
|
||||||
extract{mix{op, r}, 0}
|
if (hasarch{'AARCH64'}) vfold{op, r}
|
||||||
|
else extract{mix{op, r}, 0}
|
||||||
}
|
}
|
||||||
export{'simd_sum_f64', fold_assoc_0{f64,+}}
|
export{'simd_sum_f64', fold_assoc_0{f64,+}}
|
||||||
|
|||||||
@ -29,6 +29,9 @@ def mul1{a:T,b:T & w64i{T}} = emit{ty_dbl{v_dbl{T}}, ntyp{'vmull', T}, a, b}
|
|||||||
def mul2{a:T,b:T & w128i{T}} = emit{ty_dbl{T}, ntyp0{'vmull_high', T}, a, b}
|
def mul2{a:T,b:T & w128i{T}} = emit{ty_dbl{T}, ntyp0{'vmull_high', T}, a, b}
|
||||||
def mul12{a:T,b:T & w128{T}} = tup{mul1{half{a,0}, half{b,0}}, mul2{a,b}}
|
def mul12{a:T,b:T & w128{T}} = tup{mul1{half{a,0}, half{b,0}}, mul2{a,b}}
|
||||||
|
|
||||||
|
def __or{a:T,b:T & nvecf{T}} = T~~ __or{ty_u{a}, ty_u{b}}
|
||||||
|
def __and{a:T,b:T & nvecf{T}} = T~~__and{ty_u{a}, ty_u{b}}
|
||||||
|
def __xor{a:T,b:T & nvecf{T}} = T~~__xor{ty_u{a}, ty_u{b}}
|
||||||
def __add{a:T,b:T & nvec {T}} = emit{T, ntyp{'vadd', T}, a, b}
|
def __add{a:T,b:T & nvec {T}} = emit{T, ntyp{'vadd', T}, a, b}
|
||||||
def __sub{a:T,b:T & nvec {T}} = emit{T, ntyp{'vsub', T}, a, b}
|
def __sub{a:T,b:T & nvec {T}} = emit{T, ntyp{'vsub', T}, a, b}
|
||||||
def __mul{a:T,b:T & nvec {T}} = emit{T, ntyp{'vmul', T}, a, b}
|
def __mul{a:T,b:T & nvec {T}} = emit{T, ntyp{'vmul', T}, a, b}
|
||||||
@ -69,10 +72,10 @@ def __ne{a:T,b:T & nvec{T}} = ~(a==b)
|
|||||||
|
|
||||||
def fold_add {a:T & nvec{T}} = emit{eltype{T}, ntyp{'vaddv', T}, a}
|
def fold_add {a:T & nvec{T}} = emit{eltype{T}, ntyp{'vaddv', T}, a}
|
||||||
def fold_addw{a:T & nveci{T}} = emit{ty_dbl{eltype{T}}, ntyp{'vaddlv', T}, a}
|
def fold_addw{a:T & nveci{T}} = emit{ty_dbl{eltype{T}}, ntyp{'vaddlv', T}, a}
|
||||||
def fold_min {a:T & nvec{T} & elwidth{T}<=32} = emit{eltype{T}, ntyp{'vminv', T}, a}
|
def fold_min {a:T & nvec{T} & ~nveci{T,64}} = emit{eltype{T}, ntyp{'vminv', T}, a}
|
||||||
def fold_max {a:T & nvec{T} & elwidth{T}<=32} = emit{eltype{T}, ntyp{'vmaxv', T}, a}
|
def fold_max {a:T & nvec{T} & ~nveci{T,64}} = emit{eltype{T}, ntyp{'vmaxv', T}, a}
|
||||||
def vfold{F, x:T & nvec{T} & match{F, min} & elwidth{T}<=32} = fold_min{x}
|
def vfold{F, x:T & nvec{T} & ~nveci{T,64} & match{F, min}} = fold_min{x}
|
||||||
def vfold{F, x:T & nvec{T} & match{F, max} & elwidth{T}<=32} = fold_max{x}
|
def vfold{F, x:T & nvec{T} & ~nveci{T,64} & match{F, max}} = fold_max{x}
|
||||||
def vfold{F, x:T & nvec{T} & match{F, +}} = fold_add{x}
|
def vfold{F, x:T & nvec{T} & match{F, +}} = fold_add{x}
|
||||||
|
|
||||||
# TODO don't rely on regular stores being unaligned
|
# TODO don't rely on regular stores being unaligned
|
||||||
|
|||||||
@ -73,9 +73,9 @@ fn squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = {
|
|||||||
def foldTotal{TE, x:T & hasarch{'AARCH64'}} = {
|
def foldTotal{TE, x:T & hasarch{'AARCH64'}} = {
|
||||||
if (elwidth{T}==64) {
|
if (elwidth{T}==64) {
|
||||||
if (width{TE}==64 and bulk==2) cast_i{TE, half{x,0} | half{x,1}}
|
if (width{TE}==64 and bulk==2) cast_i{TE, half{x,0} | half{x,1}}
|
||||||
else fold_max{narrow{TE, x}}
|
else vfold{max, narrow{TE, x}}
|
||||||
} else {
|
} else {
|
||||||
fold_max{x}
|
vfold{max, x}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,14 +1,6 @@
|
|||||||
# Fold associative/commutative operation across a register
|
# Fold associative/commutative operation across a register
|
||||||
# Used by squeeze.singeli, count.singeli
|
# Used by squeeze.singeli, count.singeli
|
||||||
# Has to be included after util/tup because of name conflict
|
|
||||||
|
|
||||||
def vfold{F, x:T} = {
|
|
||||||
show{'WARNING: using fallback fold for ', F, T}
|
|
||||||
def E = eltype{T}
|
|
||||||
r:E = 0
|
|
||||||
each{{i} => { r = F{r, extract{x, i}} }, iota{vcount{T}}}
|
|
||||||
r
|
|
||||||
}
|
|
||||||
def vfold{F, x:T & w128{T} & hasarch{'X86_64'}} = {
|
def vfold{F, x:T & w128{T} & hasarch{'X86_64'}} = {
|
||||||
c:= x
|
c:= x
|
||||||
def EW = elwidth{T}
|
def EW = elwidth{T}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user