X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=simple_v_extension%2Fdaxpy_example.mdwn;h=2958be48669af07b0360f1554ec68f502ad51559;hb=2ad33eb27a0717e99c3502794e16f947a758532c;hp=b6bc63139ba627559da40f627fd6e7d370bb2f85;hpb=1e62cf38107051ffcf04955a1ea3a629927d254b;p=libreriscv.git diff --git a/simple_v_extension/daxpy_example.mdwn b/simple_v_extension/daxpy_example.mdwn index b6bc63139..2958be486 100644 --- a/simple_v_extension/daxpy_example.mdwn +++ b/simple_v_extension/daxpy_example.mdwn @@ -1,22 +1,95 @@ - # c code +# c code + +``` void daxpy(size_t n, double a, const double x[], double y[]) { for (size_t i = 0; i < n; i++) { y[i] = a*x[i] + y[i]; } } +``` + +# SVP64 Power ISA version + +Summary: 9 instructions (see below, 8 instructions) 5 of which are 64-bit +for a total of 14 "words". + +``` + # r5: n count + # r6: x ptr + # r7: y ptr + # fp1: a mul-scalar + 1 mtctr 5 # move n to CTR + 2 addi r10,r6,0 # copy y-ptr into r10 (y') + 3 .L2 + 4 setvl MAXVL=32,VL=CTR # actually VL=MIN(MAXVL,CTR) + 5 sv.lfdup *32,8(6) # load x into fp32-63, inc x + 6 sv.lfdup *64,8(7) # load y into fp64-95, inc y + 7 sv.fmadd *64,*64,1,*32 # (*y) = (*y) * (*x) + fp1 + 8 sv.stfdup *64,8(10) # store at y-copy, inc y' + 9 sv.bc/ctr .L2 # decrement CTR by VL + 10 blr # return +``` + +A refinement, reducing 1 instruction and register port usage. +Relies on post-increment, relies on no overlap between x and y +in memory, and critically relies on y overwrite. - # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a (scalar) - VBLK.REG[0] = {type: F, isvec: 1, regkey: a3, regidx: a3, elwidth: dflt} - VBLK.REG[1] = {type: F, isvec: 1, regkey: a7, regidx: a7, elwidth: dflt} +``` + # r5: n count + # r6: x ptr + # r7: y ptr + # fp1: a mul-scalar + 1 mtctr 5 # move n to CTR + 2 .L2 + 3 setvl MAXVL=32,VL=CTR # actually VL=MIN(MAXVL,CTR) + 4 sv.lfdup *32,8(6) # load x into fp32-63, incr x + 5 sv.lfd *64,8(7) # load y into fp64-95, NO INC + 6 sv.fmadd *64,*64,1,*32 # (*y) = (*y) * (*x) + fp1 + 7 sv.stfdup *64,8(7) # store at y, incr y + 8 sv.bc/ctr .L2 # decrement CTR by VL + 9 blr # return +``` + +# RVV version + +Summary: 12 instructions, 7 32-bit and 5 16-bit for a total of 9.5 "words" + +``` + # a0 is n, a1 is pointer to x[0], a2 is pointer to y[0], fa0 is a + li t0, 2<<25 + vsetdcfg t0 # enable 2 64b Fl.Pt. registers loop: - VBLK.SETVL t0, a0, #4 # MVL=4, vl = t0 = min(a0, MVL)) - ld a3, a1 # load 4 registers a3-6 from x - c.slli t1, t0, 3 # t1 = vl * 8 (in bytes) - c.ld a7, a2 # load 4 registers a7-10 from y - c.add a1, a1, t1 # increment pointer to x by vl*8 - fmadd a7, a3, fa0, a7 # v1 += v0 * fa0 (y = a * x + y) - c.sub a0, a0, t0 # n -= vl (t0) - c.st a7, a2 # store 4 registers a7-10 to y - c.add a2, a2, t1 # increment pointer to y by vl*8 - c.bnez a0, loop # repeat if n != 0 + setvl t0, a0 # vl = t0 = min(mvl, n) + vld v0, a1 # load vector x + c.slli t1, t0, 3 # t1 = vl * 8 (in bytes) + vld v1, a2 # load vector y + c.add a1, a1, t1 # increment pointer to x by vl*8 + vfmadd v1, v0, fa0, v1 # v1 += v0 * fa0 (y = a * x + y) + c.sub a0, a0, t0 # n -= vl (t0) + vst v1, a2 # store Y + c.add a2, a2, t1 # increment pointer to y by vl*8 + c.bnez a0, loop # repeat if n != 0 + c.ret # return +``` + +# SVE Version + +``` + 1 // x0 = &x[0], x1 = &y[0], x2 = &a, x3 = &n + 2 daxpy_: + 3 ldrswx3, [x3] // x3=*n + 4 movx4, #0 // x4=i=0 + 5 whilelt p0.d, x4, x3 // p0=while(i++