diff --git a/build/src/build.bqn b/build/src/build.bqn index 7ad34a16..6d915d0d 100755 --- a/build/src/build.bqn +++ b/build/src/build.bqn @@ -696,7 +696,7 @@ cachedBin‿linkerCache ← { "xa."‿"src/builtins/squeeze.c"‿"squeeze", "xa."‿"src/utils/mut.c"‿"copy", "xa."‿"src/utils/bits.c"‿"bits", "xag"‿"src/builtins/transpose.c"‿"transpose", "xag"‿"src/builtins/search.c"‿"search", "xag"‿"src/builtins/selfsearch.c"‿"selfsearch" - "xag"‿"src/builtins/scan.c"‿"scan", "xa."‿"src/builtins/fold.c"‿"fold", + "xag"‿"src/builtins/scan.c"‿"scan", "xag"‿"src/builtins/fold.c"‿"fold", "xag"‿"src/builtins/slash.c"‿"slash", "xag"‿"src/builtins/slash.c"‿"replicate", "xag"‿"src/builtins/sort.c"‿"bins", "xa."‿"src/builtins/slash.c"‿"count" diff --git a/src/builtins/fold.c b/src/builtins/fold.c index bce40bb7..262fcb14 100644 --- a/src/builtins/fold.c +++ b/src/builtins/fold.c @@ -15,7 +15,7 @@ #include "../builtins.h" #include "../utils/mut.h" -#if SINGELI_SIMD +#if SINGELI #define SINGELI_FILE fold #include "../utils/includeSingeli.h" #endif @@ -96,8 +96,8 @@ B sum_c1(B t, B x) { } r += s; } else { - #if SINGELI_SIMD - r = simd_sum_f64(xv, ia); + #if SINGELI + r = si_sum_f64(xv, ia); #else r=0; for (usz i=0; i) } DEF_MIN_MAX(i8) DEF_MIN_MAX(i16) DEF_MIN_MAX(i32) -#if SINGELI_SIMD - static f64 min_f64(void* xv, usz ia) { return simd_fold_min_f64(xv,ia); } - static f64 max_f64(void* xv, usz ia) { return simd_fold_max_f64(xv,ia); } +#if SINGELI + static f64 min_f64(void* xv, usz ia) { return si_fold_min_f64(xv,ia); } + static f64 max_f64(void* xv, usz ia) { return si_fold_max_f64(xv,ia); } #else DEF_MIN_MAX(f64) #endif diff --git a/src/singeli/src/fold.singeli b/src/singeli/src/fold.singeli index 1f262be5..9eec3045 100644 --- a/src/singeli/src/fold.singeli +++ b/src/singeli/src/fold.singeli @@ -20,7 +20,13 @@ def reduce_pairwise{op, plog, x:*T, len, init:T} = { r } -fn fold_idem{T==f64, op}(x:*T, len:u64) : T = { +fn fold_idem{T, op}(x:*T, len:u64) : T = { + assert{len > 0} + a := load{x, 0} + @for (x over _ from 1 to len) a = op{a, x} + a +} +fn fold_idem{T==f64, op if has_simd}(x:*T, len:u64) : T = { def bulk = arch_defvw/width{T} def V = [bulk]T xv:= *V ~~ x @@ -45,10 +51,15 @@ fn fold_idem{T==f64, op}(x:*T, len:u64) : T = { extract{r, 0} } -export{'simd_fold_min_f64', fold_idem{f64,min}} -export{'simd_fold_max_f64', fold_idem{f64,max}} +export{'si_fold_min_f64', fold_idem{f64,min}} +export{'si_fold_max_f64', fold_idem{f64,max}} -fn fold_assoc_0{T==f64, op}(x:*T, len:u64) : T = { +fn fold_assoc_0{T, op}(x:*T, len:u64) : T = { + a:T = 0 + @for (x over len) a = op{a, x} + a +} +fn fold_assoc_0{T==f64, op if has_simd}(x:*T, len:u64) : T = { def bulk = arch_defvw/width{T} def V = [bulk]T xv:= *V ~~ x @@ -58,4 +69,4 @@ fn fold_assoc_0{T==f64, op}(x:*T, len:u64) : T = { if (hasarch{'AARCH64'}) vfold{op, r} else extract{mix{op, r}, 0} } -export{'simd_sum_f64', fold_assoc_0{f64,+}} +export{'si_sum_f64', fold_assoc_0{f64,+}}