From 3a3f213a92cf31bb79f1cf7e23ced41f6bda40a9 Mon Sep 17 00:00:00 2001 From: dzaima Date: Wed, 7 Dec 2022 23:53:19 +0200 Subject: [PATCH] assign broadcast{T,v} to T**v --- src/singeli/src/avx.singeli | 6 +++--- src/singeli/src/avx2.singeli | 4 ++-- src/singeli/src/base.singeli | 1 + src/singeli/src/bitops.singeli | 4 ++-- src/singeli/src/bits.singeli | 4 ++-- src/singeli/src/cmp.singeli | 2 +- src/singeli/src/constrep.singeli | 10 +++++----- src/singeli/src/copy.singeli | 6 +++--- src/singeli/src/dyarith.singeli | 22 +++++++++++----------- src/singeli/src/equal.singeli | 8 ++++---- src/singeli/src/fold.singeli | 2 +- src/singeli/src/mask.singeli | 4 ++-- src/singeli/src/neq.singeli | 4 ++-- src/singeli/src/scan.singeli | 12 ++++++------ src/singeli/src/select.singeli | 18 +++++++++--------- src/singeli/src/squeeze.singeli | 12 ++++++------ src/singeli/src/sse3.singeli | 6 +++--- 17 files changed, 63 insertions(+), 62 deletions(-) diff --git a/src/singeli/src/avx.singeli b/src/singeli/src/avx.singeli index 65dc15b4..0fc8300d 100644 --- a/src/singeli/src/avx.singeli +++ b/src/singeli/src/avx.singeli @@ -60,7 +60,7 @@ def iota{T & w256{T}} = make{T, ...iota{vcount{T}}} def __xor{a:T, b:T & w256{T}} = T ~~ emit{[8]f32, '_mm256_xor_ps', v2f{a}, v2f{b}} def __and{a:T, b:T & w256{T}} = T ~~ emit{[8]f32, '_mm256_and_ps', v2f{a}, v2f{b}} def __or {a:T, b:T & w256{T}} = T ~~ emit{[8]f32, '_mm256_or_ps', v2f{a}, v2f{b}} -def __not{a:T & w256u{T}} = a ^ broadcast{T, ~cast{eltype{T},0}} +def __not{a:T & w256u{T}} = a ^ (T ** ~cast{eltype{T},0}) def andnot{a:T, b:T & w256{T}} = T ~~ emit{[8]f32, '_mm256_andnot_ps', v2f{b}, v2f{a}} # float comparison @@ -81,7 +81,7 @@ def __div{a:T,b:T & T==[8]f32} = emit{T, '_mm256_div_ps', a, b} def max{a:T,b:T & T==[8]f32} = emit{T, '_mm256_max_ps', a, b} def min{a:T,b:T & T==[8]f32} = emit{T, '_mm256_min_ps', a, b} def sqrt{a:T,b:T & T==[8]f32} = emit{T, '_mm256_sqrt_ps', a, b} -def abs{a:[8]f32} = emit{[8]f32, '_mm256_and_ps', a, v2f{broadcast{[8]u32, 0x7FFFFFFF}}} +def abs{a:[8]f32} = emit{[8]f32, '_mm256_and_ps', a, v2f{[8]u32 ** 0x7FFFFFFF}} def floor{a:[8]f32} = emit{[8]f32, '_mm256_floor_ps', a} def ceil{a:[8]f32} = emit{[8]f32, '_mm256_ceil_ps', a} @@ -93,7 +93,7 @@ def __div{a:T,b:T & T==[4]f64} = emit{T, '_mm256_div_pd', a, b} def max{a:T,b:T & T==[4]f64} = emit{T, '_mm256_max_pd', a, b} def min{a:T,b:T & T==[4]f64} = emit{T, '_mm256_min_pd', a, b} def sqrt{a:T,b:T & T==[4]f64} = emit{T, '_mm256_sqrt_pd', a, b} -def abs{a:[4]f64} = emit{[4]f64, '_mm256_and_pd', a, v2d{broadcast{[4]u64, (cast{u64,1}<<63)-1}}} +def abs{a:[4]f64} = emit{[4]f64, '_mm256_and_pd', a, v2d{[4]u64 ** ((cast{u64,1}<<63)-1)}} def floor{a:[4]f64} = emit{[4]f64, '_mm256_floor_pd', a} def ceil{a:[4]f64} = emit{[4]f64, '_mm256_ceil_pd', a} diff --git a/src/singeli/src/avx2.singeli b/src/singeli/src/avx2.singeli index 57d4214b..2de05b6e 100644 --- a/src/singeli/src/avx2.singeli +++ b/src/singeli/src/avx2.singeli @@ -135,12 +135,12 @@ def extract{x:T, i & w256i{T,16} & knum{i}} = emit{eltype{T}, '_mm256_extract_ep # mask stuff def getmask{x:T & w256{T, 8}} = emit{u32, '_mm256_movemask_epi8', x} def getmask{x:T & w256{T, 16}} = { - msk:u32 = getmask{emit{[32]u8, '_mm256_packs_epi16', x, broadcast{[16]u16, 0}}} + msk:u32 = getmask{emit{[32]u8, '_mm256_packs_epi16', x, [16]u16**0}} (msk&255) | (msk>>8) } def any{x:T & w256i{T, 16}} = getmask{[32]u8~~x}!=0 def all{x:T & w256i{T, 16}} = getmask{[32]u8~~x} == (1<<32)-1 -def anyneg{x:T & w256s{T, 16}} = getmask{[32]u8 ~~ ([16]i16~~x < broadcast{[16]i16, 0})}!=0 +def anyneg{x:T & w256s{T, 16}} = getmask{[32]u8 ~~ ([16]i16~~x < [16]i16**0)}!=0 # conversion diff --git a/src/singeli/src/base.singeli b/src/singeli/src/base.singeli index 75d75ac1..6b421d61 100644 --- a/src/singeli/src/base.singeli +++ b/src/singeli/src/base.singeli @@ -3,6 +3,7 @@ include 'arch/c' include 'util/kind' oper ~~ reinterpret infix right 55 +oper ** broadcast infix right 55 def Size = u64 def load{x} = *x diff --git a/src/singeli/src/bitops.singeli b/src/singeli/src/bitops.singeli index 99aeb4a3..cf370033 100644 --- a/src/singeli/src/bitops.singeli +++ b/src/singeli/src/bitops.singeli @@ -50,7 +50,7 @@ def b_setBatch{sz, x:*u64, n:(Size), v & sz==64} = store{ x, n, cast_i{u6 def spreadBits{T==[32]u8, a:u32} = { def idxs = iota{32} - b:= broadcast{[8]u32, a} + b:= [8]u32**a c:= [32]u8~~b d:= sel{[16]u8, c, make{[32]i8, idxs>>3 + bit{4, idxs}}} e:= make{[32]u8, 1< { cb:= loadBatchBit{VT, bits, i} diff --git a/src/singeli/src/cmp.singeli b/src/singeli/src/cmp.singeli index f6af7a72..f9c81a1f 100644 --- a/src/singeli/src/cmp.singeli +++ b/src/singeli/src/cmp.singeli @@ -81,7 +81,7 @@ aa2bit{VT, unr, op}(dst:*u64, wr:*void, xr:*void, len:Size) : void = { as2bit{VT, unr, op}(dst:*u64, wr:*void, x:u64, len:Size) : void = { wv:= *VT~~wr; ws:= *eltype{VT}~~wr - xv:= broadcast{VT, pathAS{dst, len, eltype{VT}, op, x}} + xv:= VT**pathAS{dst, len, eltype{VT}, op, x} any2bit{VT, unr, op, {i}=>load{ws,i}, {i}=>load{wv,i}, {i}=>x, {i}=>xv, dst, len} } diff --git a/src/singeli/src/constrep.singeli b/src/singeli/src/constrep.singeli index 1203d131..1b68a430 100644 --- a/src/singeli/src/constrep.singeli +++ b/src/singeli/src/constrep.singeli @@ -45,10 +45,10 @@ def get_rep_iter{V==[4]u64, wv} = { def read_shuf_vecs{l, elbytes:u64, shp:*[32]i8} = { def double{x} = { s:=shuf{[4]u64, x, 4b3120}; s+=s - each{bind{~~,[32]i8},unpackQ{s, s+broadcast{type{s},1}}} + each{bind{~~,[32]i8},unpackQ{s, s + type{s}**1}} } def doubles{n,tup} = slice{join{each{double,tup}}, 0, n} - def sh = each{{v}=>{r:=v}, copy{l, broadcast{[32]i8, 0}}} + def sh = each{{v}=>{r:=v}, copy{l, [32]i8**0}} def tlen{e} = (l+(-l)%e)/e # Length for e bytes, rounded up def set{i} = { tupsel{i,sh} = each{bind{load,shp},i} } def ext{e} = { @@ -69,7 +69,7 @@ def rep_const_shuffle{V, wv, onreps, xv:*V, rv:*V, n:u64} = { if (nv*step < n) { nr := n * wv e := nr / step - s := broadcast{V, 0} + s := V**0 def end = makelabel{} onreps{load{xv,nv}, {v} => { s = v @@ -112,7 +112,7 @@ rep_const_shuffle_partial4(wv:u64, elbytes:u64, x:*i8, r:*i8, n:u64) : void = { } re += (h-1)*step a := shuf{[4]u64, load{*V~~(x+i),0}, 4b1010} - s := broadcast{V, 0} + s := V**0 def end = makelabel{} @unroll (j to h) { s = sel{[16]i8, a, tupsel{j,sh}} @@ -140,7 +140,7 @@ def rep_const_broadcast{T, kv, loop, wv:u64, x:*T, r:*T, n:u64} = { assert{kv > 0} def V = [256/width{T}]T @for (x over n) { - v := broadcast{V, x} + v := V**x @loop (j to kv) store{*V~~r, j, v} r += wv store{*V~~r, -1, v} diff --git a/src/singeli/src/copy.singeli b/src/singeli/src/copy.singeli index 1c9669b1..3bdc7279 100644 --- a/src/singeli/src/copy.singeli +++ b/src/singeli/src/copy.singeli @@ -12,7 +12,7 @@ def copyFromBits{T, xp: *u64, rp: *eltype{T}, l:u64} = { maskedLoop{bulk, l, {i, M} => { x:= loadBatchBit{TU, xp, i} - r:= x & TU~~broadcast{T, 1} + r:= x & TU ~~ T**1 storeBatch{rp, i, T~~r, M} }} } @@ -32,7 +32,7 @@ copy{vw, X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = { assert{((X==u8) | (X==u16)) | (X==u32)} maskedLoop{bulk, l, {i, M} => { # TODO could maybe read 256 bits and use unpack to write >256 v:= loadBatch{xp, i, RV} - v|= broadcast{RV, cbqn_c32Tag{} << 48} + v|= RV ** (cbqn_c32Tag{} << 48) storeBatch{rp, i, v, M} }} } else if (X==u1 and R==u1) { @@ -50,7 +50,7 @@ copy{vw, X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = { def XU = ty_u{XV} @forNZ (i to cdiv{l,vcount{XV}}) { v:= loadBatch{xp, i, XV} - r:= getmask{(XU~~v) == XU~~broadcast{XV,1}} + r:= getmask{(XU~~v) == XU~~XV**1} b_setBatch{vcount{XV}, rp, i, r} # TODO something more special for f64 } } else if (width{X}<=width{R}) { diff --git a/src/singeli/src/dyarith.singeli b/src/singeli/src/dyarith.singeli index f118bc2f..84b05a0a 100644 --- a/src/singeli/src/dyarith.singeli +++ b/src/singeli/src/dyarith.singeli @@ -36,8 +36,8 @@ def arithChk2{F, M, w:T, x:T & is_s{T} & (match{F,__add} | match{F,__sub})} = { # × def arithChk2{F, M, w:T, x:T & match{F,__mul} & isvec{T} & i8==eltype{T}} = { - def wp = unpackQ{w, T ~~ (broadcast{T,0}>w)} - def xp = unpackQ{x, T ~~ (broadcast{T,0}>x)} + def wp = unpackQ{w, T ~~ (T**0 > w)} + def xp = unpackQ{x, T ~~ (T**0 > x)} def rp = each{__mul, wp, xp} def bad = each{{v} => [16]i16 ~~ ((v<<8)>>8 != v), rp} if (M{0}) { # masked check @@ -52,20 +52,20 @@ def arithChk2{F, M, w:T, x:T & match{F,__mul} & isvec{T} & i16==eltype{T}} = { tup{rl, anyne{rh, rl>>15, M}} } def arithChk2{F, M, w:T, x:T & match{F,__mul} & isvec{T} & i32==eltype{T}} = { - max:= [8]f32 ~~ broadcast{[8]u32, 0x4efffffe} + max:= [8]f32 ~~ [8]u32**0x4efffffe def cf32{x} = emit{[8]f32, '_mm256_cvtepi32_ps', x} f32mul:= cf32{w} * cf32{x} tup{w*x, any{M{abs{f32mul} >= max}}} # TODO fallback to the below if the above fails # TODO don't do this, but instead shuffle one half, do math, unshuffle that half - # def wp = unpackQ{w, broadcast{T, 0}} - # def xp = unpackQ{x, broadcast{T, 0}} + # def wp = unpackQ{w, T**0} + # def xp = unpackQ{x, T**0} # def rp = each{__mul32, wp, xp} # def T2 = to_el{i64, T} # def bad = each{{v} => { - # (((T2~~v) + broadcast{T2,0x80000000}) ^ broadcast{T2, cast{i64,1}<<63}) > broadcast{T2, cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF}} + # ((T2~~v + T2**0x80000000) ^ T2**(cast{i64,1}<<63)) > T2**cast_i{i64, (cast{u64,1}<<63) | 0xFFFFFFFF} # }, rp} - # tup{packQQ{each{{v} => v&broadcast{T2, 0xFFFFFFFF}, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}} this doesn't use M + # tup{packQQ{each{{v} => v & T2**0xFFFFFFFF, rp}}, any{tupsel{0,bad}|tupsel{1,bad}}} this doesn't use M } @@ -83,7 +83,7 @@ def runner{u, R, F} = { def run{F, OO, M, w, x & u} = F{w, x} # trivial base implementation - def toggleTop{x:X} = x ^ broadcast{X, 1<<(width{eltype{X}}-1)} + def toggleTop{x:X} = x ^ X**(1<<(width{eltype{X}}-1)) def run{F==__sub, OO, M, w:VU, x:VU & is_u{VU}} = { # 'b'-'a' def VS = ty_s{VU} run{F, OO, M, VS~~toggleTop{w}, VS~~toggleTop{x}} @@ -95,7 +95,7 @@ def runner{u, R, F} = { def run{F, OO, M, w:VW, x:VX & c & R==u32 & (match{F,__add} | match{F,__sub})} = { # 'a'+1, 'a'-1 r:= F{ty_u{w}, ty_u{x}} - if (any{M{r > broadcast{type{r}, 1114111}}}) OO{} + if (any{M{r > type{r}**1114111}}) OO{} to_el{R, VW}~~r } run @@ -163,7 +163,7 @@ arithSAf{vw, mode, F, swap, W, X, R}(r:*void, w:u64, x:*void, len:u64) : u64 = { def run = runner{(R==f64) | (mode==2), R, F} def getW{v} = trunc{W, v} def getW{v & W==f64} = interp_f64{v} - cw:= broadcast{ty_sc{W, TY}, getW{w}} + cw:= ty_sc{W, TY}**getW{w} maskedLoop{bulk, len, {i, M} => { cx:= loadBatch{*X~~x, i, ty_sc{X, TY}} @@ -181,7 +181,7 @@ andBytes{vw}(r: *u8, x: *u8, maskU64:u64, len:u64) : void = { def bulk = vw / width{u8} def T8 = [bulk]u8 def T64 = [bulk/8]u64 - maskFull:= T8~~broadcast{T64, maskU64} + maskFull:= T8~~T64**maskU64 maskedLoop{bulk, len, {i, M} => { storeBatch{r, i, loadBatch{x, i, T8} & maskFull, M} }} diff --git a/src/singeli/src/equal.singeli b/src/singeli/src/equal.singeli index eb922b28..2ce49cda 100644 --- a/src/singeli/src/equal.singeli +++ b/src/singeli/src/equal.singeli @@ -26,12 +26,12 @@ equal{W, X}(w:*void, x:*void, l:u64, d:u64) : u1 = { } else if (X==f64) { # bitarr ≡ f64arr def T = [4]f64 def bulk = 4 - f0:= broadcast{T, 0.0} - f1:= broadcast{T, 1.0} + f0:= T**0.0 + f1:= T**1.0 maskedLoopPositive{bulk, l, {i, M} => { cw:= load{*u8 ~~ w, i>>1} >> cast_i{u8, 4*(i&1)} cx:= load{*T ~~ x, i} - wu:= blend{f0, f1, broadcast{[4]u64, cw} << make{[4]u64,63,62,61,60}} + wu:= blend{f0, f1, [4]u64**cw << make{[4]u64,63,62,61,60}} if (anynePositive{wu, cx, M}) return{0} }} } else { # bitarr ≡ i8/i16/i32arr @@ -40,7 +40,7 @@ equal{W, X}(w:*void, x:*void, l:u64, d:u64) : u1 = { def sh{c & X==u8} = T ~~ (to_el{u16,c}<<7) # TODO compare with doing the comparison in vector registers - badBits:= broadcast{T, ~cast{X,1}} + badBits:= T ** ~cast{X,1} maskedLoop{bulk, l, {i, M} => { cw:= load{*ty_u{bulk} ~~ w, i} cx:= load{*T ~~ x, i} diff --git a/src/singeli/src/fold.singeli b/src/singeli/src/fold.singeli index d72ccda7..e4b03341 100644 --- a/src/singeli/src/fold.singeli +++ b/src/singeli/src/fold.singeli @@ -25,7 +25,7 @@ fold_idem{T==f64, op}(x:*T, len:u64) : T = { def step = 256/width{T} def V = [step]T xv:= *V ~~ x - r:V = broadcast{[4]f64, 0} + r:V = [4]f64**0 if (len < step) { # Can't overlap like the long case assert{len > 0} diff --git a/src/singeli/src/mask.singeli b/src/singeli/src/mask.singeli index 57f5e698..005e371e 100644 --- a/src/singeli/src/mask.singeli +++ b/src/singeli/src/mask.singeli @@ -1,12 +1,12 @@ local def maskInit1{w} = { apply{merge, each{{x} => { - merge{broadcast{w/8-1, 255}, (1<>3)^31 + 64*(n&7))} mask128_1:*u8 = maskInit1{128}; def maskOfBit{T,n & w128{T}} = load{*[16]u8 ~~ (mask128_1 + (n>>3)^15 + 32*(n&7))} -mask256:*i64 = merge{broadcast{4, -1}, broadcast{4, 0}} +mask256:*i64 = merge{4 ** -1, 4 ** 0} local def maskOfImpl{T, n, w} = load{*ty_u{T} ~~ (*u8~~mask256 + 32 - n*(width{eltype{T}}/8))} # get mask of first n items; 0 ≤ n ≤ vcount{T} diff --git a/src/singeli/src/neq.singeli b/src/singeli/src/neq.singeli index f258032a..3a68c4d8 100644 --- a/src/singeli/src/neq.singeli +++ b/src/singeli/src/neq.singeli @@ -9,7 +9,7 @@ def storel{a:T, n, v & w128i{eltype{T} }} = emit{void, '_mm_storeu_si64', clmul_scan_ne_any(x:*void, r:*void, init:u64, words:u64, mark:u64) : void = { def V = [2]u64 - m := broadcast{V, mark} + m := V**mark def xor64{a, i, carry} = { # carry is 64-bit broadcasted current total p := clmul{a, m, i} t := shr{[16]u8, p, 8} @@ -20,7 +20,7 @@ clmul_scan_ne_any(x:*void, r:*void, init:u64, words:u64, mark:u64) : void = { xv := *V ~~ x rv := *V ~~ r e := words/2; - c := broadcast{V, init} + c := V**init @for (rv, xv over e) { rv = apply{unpacklo, (@collect (j to 2) xor64{xv, j, c})} } diff --git a/src/singeli/src/scan.singeli b/src/singeli/src/scan.singeli index 0d4fb034..051efa70 100644 --- a/src/singeli/src/scan.singeli +++ b/src/singeli/src/scan.singeli @@ -19,7 +19,7 @@ def spread{a:VT} = { def scan_loop{T, init, x:*T, r:*T, len:u64, scan, scan_last} = { def step = 256/width{T} def V = [step]T - p:= broadcast{V, init} + p:= V**init xv:= *V ~~ x rv:= *V ~~ r e:= len/step @@ -31,7 +31,7 @@ def scan_post{T, init, x:*T, r:*T, len:u64, op, pre} = { def last{v, p} = op{pre{v}, p} def scan{v, p} = { n:= last{v, p} - p = (if (width{T}<=32) sel{[8]i32, spread{n}, broadcast{[8]i32, 7}}; + p = (if (width{T}<=32) sel{[8]i32, spread{n}, [8]i32**7}; else shuf{[4]u64, n, 4b3333}) n } @@ -100,14 +100,14 @@ avx2_bcs{T}(x:*u64, r:*T, l:u64) : void = { def V = [vl]U rv:= *V~~r xv:= *u32~~x - c:= broadcast{V, 0} + c:= V**0 def ii32 = iota{32}; def bit{k}=bit{k,ii32}; def tail{k}=tail{k,ii32} def sums{n} = (if (n==0) tup{0}; else { def s=sums{n-1}; merge{s,s+1} }) - def widen{v:T} = unpackQ{shuf{[4]u64, v, 4b3120}, broadcast{T, 0}} + def widen{v:T} = unpackQ{shuf{[4]u64, v, 4b3120}, T**0} def sumlanes{x:u32} = { - b:= broadcast{[8]u32, x} >> make{[8]u32, 4*tail{1, iota{8}}} + b:= [8]u32**x >> make{[8]u32, 4*tail{1, iota{8}}} s:= sel8{[32]u8~~b, ii32>>3 + bit{2}} p:= s & make{[32]u8, (1<<(1+tail{2})) - 1} # Prefixes d:= sel{[16]u8, make{[32]u8, merge{sums{4},sums{4}}}, [32]i8~~p} @@ -123,7 +123,7 @@ avx2_bcs{T}(x:*u64, r:*T, l:u64) : void = { v := V~~v0 + c # Update carry at the lane boundary if (w!=32 or tail{1,k}) { - c = sel{[8]u32, spread{v}, make{[8]i32, broadcast{8, 7}}} + c = sel{[8]u32, spread{v}, make{[8]i32, 8**7}} } store1{rv, j+k, v} } diff --git a/src/singeli/src/select.singeli b/src/singeli/src/select.singeli index 5a5f33f7..3b8eac6b 100644 --- a/src/singeli/src/select.singeli +++ b/src/singeli/src/select.singeli @@ -22,7 +22,7 @@ def gather{def:T, b:B, idx:[4]i32, M & w256{T,64}} = { } def wrapChk{cw0, VI,xlf, M} = { - cw:= cw0 + (xlf & VI~~(cw0= ty_u{xlf}}}) return{0} cw } @@ -37,7 +37,7 @@ def storeExp{dst, ind, val, M, ext, rd, wl} = { def shuf_select{ri, rd, TI, w, r, wl, xl, selx} = { def VI = [ri]TI def ext = ri/rd - xlf:= broadcast{VI, cast_i{TI, xl}} + xlf:= VI**cast_i{TI, xl} maskedLoop{ri, wl, {i, M} => { cw:= wrapChk{loadBatch{w, i, VI}, VI,xlf, M} is:= (if (ext>1) i< se{e*2, VI~~c, o}, - unpackQ{c2, c2+broadcast{VI,1}}, + unpackQ{c2, c2 + VI**1}, 2*o + iota{2} } } @@ -57,7 +57,7 @@ def shuf_select{ri, rd, TI, w, r, wl, xl, selx} = { def perm_select{ri, rd, TI, w, r, wl, xl, selx} = { def VI = [ri]TI def ext = ri/rd - xlf:= broadcast{VI, cast_i{TI, xl}} + xlf:= VI**cast_i{TI, xl} maskedLoop{ri, wl, {i, M} => { cw:= wrapChk{loadBatch{w, i, VI}, VI,xlf, M} is:= (if (ext>1) i< { cw:= wrapChk{loadBatch{w, i, VI}, VI,xlf, M} - got:= gather{broadcast{VD,0}, x, cw, M} - if (TDE!=TD) got&= broadcast{VD, (1<32 and xl<=16) { xb:= shuf{[4]u64, spreadBits{[32]u8, load{*u32~~x0}}, 4b1010} @@ -155,7 +155,7 @@ avx2_select_bool128(w0:*void, x0:*void, r0:*void, wl:u64, xl:u64) : u1 = { }} } else { x:= shuf{[4]u64, load{*VI ~~ x0}, 4b1010} - low:= broadcast{VI, 7} + low:= VI**7 b := VI~~make{[32]u8, 1 << (iota{32} & 7)} maskedLoop{32, wl, {i, M} => { cw:= wrapChk{load{w, i}, VI,xlf, M} diff --git a/src/singeli/src/squeeze.singeli b/src/singeli/src/squeeze.singeli index 26c9bf81..cbb8b465 100644 --- a/src/singeli/src/squeeze.singeli +++ b/src/singeli/src/squeeze.singeli @@ -10,12 +10,12 @@ def preserve_negative_zero = 0 def inRangeLen{x:TS, start, count & issigned{eltype{TS}}} = { def TU = ty_u{TS} - TU~~(x-broadcast{TS,start}) < broadcast{TU,count} + (TU~~(x-TS**start)) < TU**count } def inRangeLen{x:TU, start, count & isunsigned{eltype{TU}}} = { def TS = ty_s{TU} def h = 1 << (width{eltype{TU}}-1) - TS~~(x-broadcast{TU,start-h}) < broadcast{TS,count-h} + (TS~~(x-TU**(start-h))) < TS**(count-h) } def inRangeIncl{x:T, start, end} = inRangeLen{x, start, end-start+1} def inRangeExcl{x:T, start, end} = inRangeLen{x, start, end-start} @@ -60,7 +60,7 @@ squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = { def EV = [bulk]E # show{XV, EV, CHR, B} xp:= *X~~x0 - r1:= broadcast{EV, 0} + r1:= EV**0 if (CHR) { # c8, c16, c32 def hw = width{E}/2 maskedLoop{bulk, len, {i, M} => { @@ -69,7 +69,7 @@ squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = { if (B) { if (any{M{~q_chr{c}}}) return{3} } else { - if (anynePositive{broadcast{EV, (1< { v0:= loadBatch{xp, i, XV} - if (anynePositive{broadcast{EV, 0xfe} & EV~~v0, broadcast{EV, 0}, M}) return{2} + if (anynePositive{EV**0xfe & EV~~v0, EV**0, M}) return{2} }} 0 } else { # i16, i32, f64 @@ -109,7 +109,7 @@ squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = { int } v1:= toint{v0} - r1|= M{(broadcast{EV, ~E~~1} & EV~~v1) ^ EV~~(v1 >> (width{X}-1))} + r1|= M{((EV ** ~E~~1) & EV~~v1) ^ EV~~(v1 >> (width{X}-1))} }} promote{u32, fold{|, r1}} diff --git a/src/singeli/src/sse3.singeli b/src/singeli/src/sse3.singeli index 541fb196..f7e89296 100644 --- a/src/singeli/src/sse3.singeli +++ b/src/singeli/src/sse3.singeli @@ -58,7 +58,7 @@ def iota{T & w128{T}} = make{T, ...iota{vcount{T}}} def __xor{a:T, b:T & w128{T}} = T ~~ emit{[4]f32, '_mm_xor_ps', v2f{a}, v2f{b}} def __and{a:T, b:T & w128{T}} = T ~~ emit{[4]f32, '_mm_and_ps', v2f{a}, v2f{b}} def __or {a:T, b:T & w128{T}} = T ~~ emit{[4]f32, '_mm_or_ps', v2f{a}, v2f{b}} -def __not{a:T & w128u{T}} = a ^ broadcast{T, ~cast{eltype{T},0}} +def __not{a:T & w128u{T}} = a ^ (T ** ~cast{eltype{T},0}) # signed comparison def __eq{a:T,b:T & T==[16]i8 } = emit{[16]u8, '_mm_cmpeq_epi8', a, b} @@ -118,7 +118,7 @@ def __sub{a:T,b:T & w128i{T, 64}} = emit{T, '_mm_sub_epi64', a, b} # mask stuff def getmask{x:T & w128{T, 8}} = emit{u16, '_mm_movemask_epi8', x} -def getmask{x:T & w128{T, 16}} = getmask{emit{[16]u8, '_mm_packs_epi16', x, broadcast{[8]u16, 0}}} +def getmask{x:T & w128{T, 16}} = getmask{emit{[16]u8, '_mm_packs_epi16', x, [8]u16**0}} def getmask{x:T & w128{T, 32}} = emit{u8, '_mm_movemask_ps', v2f{x}} def getmask{x:T & w128{T, 64}} = emit{u8, '_mm_movemask_pd', v2d{x}} def any{x:T & w128i{T}} = getmask{x} != 0 # assumes elements of x all have equal bits (avx2 utilizes this for 16 bits) @@ -126,7 +126,7 @@ def all{x:T & w128i{T}} = getmask{x} == (1<