uCBQN/src/singeli/src/copy.singeli
dzaima b768332c03 explicit 2x unroll for x86 copy.singeli
clang was unrolling ~8x anyway, but without ability to uninterleave the loads & stores
2025-06-01 04:50:21 +03:00

103 lines
3.1 KiB
Plaintext

include './base'
include './mask'
include './cbqnDefs'
include './bitops'
def copyFromBits{V=[bulk]T, loadFn, rp, l:(u64)} = {
def U = ty_u{V}
@for_masked{bulk}(sr in tup{'g',*T~~rp} over i to l) {
x:= loadFn{U, i}
sr{V~~(x & U ~~ V**1)}
}
}
fn copy{X, R}(r: *void, x: *void, l:u64, xRaw: *void) : void = {
def vw = arch_defvw{}
assert{l!=0}
def bulk = vw/__max{width{X}, width{R}}
xp:= *tern{X==u1, u64, X} ~~ x
rp:= *tern{R==u1, u64, R} ~~ r
def XV = [bulk]X
def RV = [bulk]R
def ur = tern{hasarch{'AARCH64'}, 4, 2}
if (X==R and R!=u1) {
if (hasarch{'X86_64'} and l<=bulk) store_narrow{rp, 0, load_widen{xp, 0, RV}, mask_first{l}}
else emit{void, 'memcpy', r, x, l*(width{X}/8)}
} else if (R==u64) {
# show{'R==u64', X, R}
assert{X==u8 or X==u16 or X==u32}
# TODO could maybe read 256 bits and use unpack to write >256
@for_mu{bulk,ur}(sr in tup{'g',rp}, x in tup{RV,xp} over l) sr{eachx{|, x, RV**(cbqn_c32Tag{}<<48)}}
} else if (X==u1 and R==u1) {
# show{'u1u1', X, R}
def V64 = [vw/64]u64
@for_mu{vcount{V64},ur}(sr in tup{'g',rp}, x in tup{V64,xp} over cdiv{l,64}) sr{x}
} else if (X==u1) {
# show{'X==u1', X, R}
copyFromBits{[bulk]R, load_expand_bits{., xp, .}, r, l}
} else if (R==u1) {
# show{'R==u1', X, R}
def XU = ty_u{XV}
def op{x} = (XU~~x) == XU~~XV**1
def unr = if (hasarch{'X86_64'} and width{X}==16) 2 else __max{8/bulk, 1}
def bulk2 = bulk*unr
xi:ux = 0
@for_nz (i to cdiv{l,bulk2}) {
store_bits{bulk2, rp, i, hom_to_int{each{{i} => op{load_widen{xp, xi+i, XV}}, iota{unr}}}}
xi+= unr
}
} else if (width{X}<=width{R}) {
# show{'w{X}<=w{R}', X, R}
@for_mu{bulk,ur}(sr in tup{'g',rp}, x in tup{RV,xp} over l) sr{x}
} else {
# show{'w{X}>w{R}', X, R}
@for_mu{bulk,ur}(sr in tup{'g',rp}, x in tup{XV,xp} over l) sr{x}
}
}
fn copy_ubit{R}(r: *void, x: *void, l:u64, xRaw: *void) : void = {
assert{l!=0}
x0:= (*u8~~xRaw) + cbqn_tyArrOffset{}
xs:= u64~~((*u8~~x) - x0)
# if ((xs&7)==0) {
# copy{u1, R}(*void~~(x0 + (xs>>3)), r, l, xRaw)
# } else {
def vw = arch_defvw{}
def bulk = vw/width{R}
def RV = [bulk]R
rp:= *R~~r
copyFromBits{RV, {T=[k]_, i} => expand_bits{T, loadu_bits_trunc{*u64~~x0, xs+i*k, k}}, r, l}
# }
}
def gen{p} = {
def ts = tup{u1, i8, i16, i32, f64, u8, u16, u32, u64}
def tn = tup{'1','i8','i16','i32','f64','c8','c16','c32','B'}
def tm = tup{0, 0, 0, 0, 0, 1, 1, 1, 2}
each{{tx0,nx,mx} => {
each{{tr0,nr,mr} => {
if ((mx==mr or mx==2 or mr==2) and (if (mx==2) mr==1 else 1)) {
def tr = if (mx==0 and mr==2) f64 else if (tx0==tr0 and mx==1) ty_s{tx0} else tr0
def tx = if (mr==0 and mx==2) f64 else if (tx0==tr0 and mx==1) ty_s{tx0} else tx0
export{merge{p, nx, '_', nr}, copy{tx, tr}}
}
}, ts, tn, tm}
}, ts, tn, tm}
}
gen{'simd_copy_'}
# unaligned bitarr widening
export{'simd_copy_1u_i8', copy_ubit{i8}}
export{'simd_copy_1u_i16', copy_ubit{i16}}
export{'simd_copy_1u_i32', copy_ubit{i32}}
export{'simd_copy_1u_f64', copy_ubit{f64}}