From 5032c9a3ab6cdd774b83f4c926abd655b5fadca1 Mon Sep 17 00:00:00 2001 From: dzaima Date: Thu, 15 Jun 2023 19:12:20 +0300 Subject: [PATCH] remove unnecessary 'over _ to' --- src/singeli/src/cmp.singeli | 2 +- src/singeli/src/copy.singeli | 8 ++++---- src/singeli/src/dyarith.singeli | 6 +++--- src/singeli/src/select.singeli | 6 +++--- src/singeli/src/squeeze.singeli | 8 ++++---- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/singeli/src/cmp.singeli b/src/singeli/src/cmp.singeli index e497fd67..dcc7a56c 100644 --- a/src/singeli/src/cmp.singeli +++ b/src/singeli/src/cmp.singeli @@ -100,7 +100,7 @@ fn as2bit{VT, unr, op}(dst:*u64, wr:*void, x:u64, len:Size) : void = { } fn bitAA{bitop}(dst:*u64, wr:*void, xr:*void, len:Size) : void = { - @forNZ (dst, w in *u64~~wr, x in *u64~~xr over _ to cdiv{len,64}) dst = bitop{w,x} + @forNZ (dst, w in *u64~~wr, x in *u64~~xr over cdiv{len,64}) dst = bitop{w,x} } fn not(dst:*u64, x:*u64, len:Size) : void = { am:=cdiv{len,64}; emit{void, 'bit_negatePtr', dst, x, am} } diff --git a/src/singeli/src/copy.singeli b/src/singeli/src/copy.singeli index 7a13920a..52d3eb29 100644 --- a/src/singeli/src/copy.singeli +++ b/src/singeli/src/copy.singeli @@ -41,11 +41,11 @@ fn copy{X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = { # show{'R==u64', X, R} assert{((X==u8) | (X==u16)) | (X==u32)} # TODO could maybe read 256 bits and use unpack to write >256 - @maskedLoop{bulk}(sr in tup{'g',rp}, x in tup{RV,xp} over _ to l) sr{x | RV**(cbqn_c32Tag{}<<48)} + @maskedLoop{bulk}(sr in tup{'g',rp}, x in tup{RV,xp} over l) sr{x | RV**(cbqn_c32Tag{}<<48)} } else if (X==u1 and R==u1) { # show{'u1u1', X, R} def V64 = [vw/64]u64 - @maskedLoop{vcount{V64}}(sr in tup{'g',rp}, x in tup{V64,xp} over _ to cdiv{l,64}) sr{x} + @maskedLoop{vcount{V64}}(sr in tup{'g',rp}, x in tup{V64,xp} over cdiv{l,64}) sr{x} } else if (X==u1) { # show{'X==u1', X, R} copyFromBits{[bulk]R, {T, i} => loadBatchBit{T, xp, i}, r, l} @@ -59,10 +59,10 @@ fn copy{X, R}(x: *void, r: *void, l:u64, xRaw: *void) : void = { } } else if (width{X}<=width{R}) { # show{'w{X}<=w{R}', X, R} - @muLoop{bulk,ur}(sr in tup{'g',rp}, x in tup{RV,xp} over _ to l) sr{x} + @muLoop{bulk,ur}(sr in tup{'g',rp}, x in tup{RV,xp} over l) sr{x} } else { # show{'w{X}>w{R}', X, R} - @muLoop{bulk,ur}(sr in tup{'g',rp}, x in tup{XV,xp} over _ to l) sr{x} + @muLoop{bulk,ur}(sr in tup{'g',rp}, x in tup{XV,xp} over l) sr{x} } } diff --git a/src/singeli/src/dyarith.singeli b/src/singeli/src/dyarith.singeli index 4c5c2363..9068f3c1 100644 --- a/src/singeli/src/dyarith.singeli +++ b/src/singeli/src/dyarith.singeli @@ -144,13 +144,13 @@ def arithAAimpl{vw, mode, F, W, X, R, w, x, r, len} = { if (R==u1) { def bulk = vw/64 def VT = [bulk]u64 - @maskedLoop{bulk}(r in tup{'g',*u64~~r}, cw in tup{VT,*u64~~w}, cx in tup{VT,*u64~~x} over _ to cdiv{len, 64}) r{F{cw,cx}} + @maskedLoop{bulk}(r in tup{'g',*u64~~r}, cw in tup{VT,*u64~~w}, cx in tup{VT,*u64~~x} over cdiv{len, 64}) r{F{cw,cx}} } else if (match{F,__mul} and W!=u1 and X==u1 and W==R) { # 0‿1‿1‿1‿1‿0‿1‿1×3‿1‿4‿1‿5‿9‿2‿6 def bulk = vw / width{W} def TU = ty_u{R} def TV = [bulk]TU - @muLoop{bulk, 2}(sr in tup{'g',*TU~~r}, cw in tup{TV,*TU~~w}, cx in tup{'b',TV,x} over _ to len) { + @muLoop{bulk, 2}(sr in tup{'g',*TU~~r}, cw in tup{TV,*TU~~w}, cx in tup{'b',TV,x} over len) { sr{each{&, cw, cx}} } } else { @@ -213,7 +213,7 @@ fn andBytes{vw}(r: *u8, x: *u8, maskU64:u64, len:u64) : void = { def T8 = [bulk]u8 def T64 = [bulk/8]u64 maskFull:= T8~~T64**maskU64 - @maskedLoop{bulk}(sr in tup{'g',r}, cx in tup{T8,x} over _ to len) sr{cx & maskFull} + @maskedLoop{bulk}(sr in tup{'g',r}, cx in tup{T8,x} over len) sr{cx & maskFull} } export{'simd_andBytes', andBytes{arch_defvw}} diff --git a/src/singeli/src/select.singeli b/src/singeli/src/select.singeli index 8c3cd9e7..e7ec9b2d 100644 --- a/src/singeli/src/select.singeli +++ b/src/singeli/src/select.singeli @@ -121,7 +121,7 @@ fn select{rw, TI, TD}(w0:*void, x0:*void, r0:*void, wl:u64, xl:u64) : u1 = { def VD = [bulk]TDE def xlf = VI**cast_i{TIE, xl} - @maskedLoop{bulk}(cw0 in tup{VI,w}, sr in tup{'g',r}, M in 'm' over _ to wl) { + @maskedLoop{bulk}(cw0 in tup{VI,w}, sr in tup{'g',r}, M in 'm' over wl) { cw:= wrapChk{cw0, VI,xlf, M} got:= gather{VD**0, x, cw, M} if (TDE!=TD) got&= VD**((1<32 and xl<=16) { xb:= shuf{[4]u64, spreadBits{[32]u8, load{*u32~~x0}}, 4b1010} - @maskedLoop{32}(cw0 in w, sr in r, M in 'm' over _ to wl) { + @maskedLoop{32}(cw0 in w, sr in r, M in 'm' over wl) { cw:= wrapChk{cw0, VI,xlf, M} sr = homMask{sel{[16]i8, xb, cw}} } @@ -154,7 +154,7 @@ fn avx2_select_bool128(w0:*void, x0:*void, r0:*void, wl:u64, xl:u64) : u1 = { x:= shuf{[4]u64, load{*VI ~~ x0}, 4b1010} low:= VI**7 b := VI~~make{[32]u8, 1 << (iota{32} & 7)} - @maskedLoop{32}(cw0 in w, sr in r, M in 'm' over _ to wl) { + @maskedLoop{32}(cw0 in w, sr in r, M in 'm' over wl) { cw:= wrapChk{cw0, VI,xlf, M} byte:= sel{[16]i8, x, VI~~(([8]u32~~(cw&~low))>>3)} mask:= sel{[16]i8, b, cw & low} diff --git a/src/singeli/src/squeeze.singeli b/src/singeli/src/squeeze.singeli index 661bfc7f..cc072258 100644 --- a/src/singeli/src/squeeze.singeli +++ b/src/singeli/src/squeeze.singeli @@ -59,7 +59,7 @@ fn squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = { r1:= EV**0 if (CHR) { # c8, c16, c32 def hw = width{E}/2 - @maskedLoop{bulk}(xv in tup{XV,xp}, M in 'm' over _ to len) { + @maskedLoop{bulk}(xv in tup{XV,xp}, M in 'm' over len) { c:= EV~~xv if (X!=u16) r1|= M{c} # for u64, just accept the garbage top 32 bits and deal with them at the end if (B) { @@ -74,14 +74,14 @@ fn squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = { 0 } else { # i8, i16, i32, f64 if (X==i8) { # i8 - @maskedLoop{bulk}(v0 in tup{XV,xp}, M in 'm' over _ to len) { + @maskedLoop{bulk}(v0 in tup{XV,xp}, M in 'm' over len) { if (anynePositive{EV**0xfe & EV~~v0, EV**0, M}) return{2} } 0 } else { # i16, i32, f64 def case_B = makeOptBranch{B, tup{Size}, {iCont} => { def XU = [bulk]u64 - @maskedLoop{bulk, iCont}(xv in tup{XV,xp}, M in 'm' over _ to len) { + @maskedLoop{bulk, iCont}(xv in tup{XV,xp}, M in 'm' over len) { v:= XU ~~ xv if (anySNaN{M, v}) return{0xffff_fffe} # not even a number } @@ -93,7 +93,7 @@ fn squeeze{vw, X, CHR, B}(x0:*void, len:Size) : u32 = { } if (isint{X}) { # i16, i32 - @muLoop{bulk, 1}(v0 in tup{XV,xp}, M in 'm' over _ to len) { + @muLoop{bulk, 1}(v0 in tup{XV,xp}, M in 'm' over len) { r1|= M{tree_fold{|, each{{v} => getAcc{EV, v}, v0}}} } } else { # f64