+# c code
+
```
- # c code
- void daxpy(size_t n, double a, const double x[], double y[])
- {
- for (size_t i = 0; i < n; i++) {
- y[i] = a*x[i] + y[i];
- }
+ void daxpy(size_t n, double a, const double x[], double y[]) {
+ for (size_t i = 0; i < n; i++)
+ y[i] = a*x[i] + y[i];
}
+```
-
- # SVP64 Power ISA version
- # r5: n
- # r5: x
- # r6: y
- # fp1: a
- mtctr 5 # move n to CTR
- addi r10,r6,0 # copy y-ptr into r10
-.L2
- setvl MAXVL=32,VL=CTR # could do more
- sv.lfdup/els *32,8(6) # load from x
- sv.lfdup/els *64,8(7) # load from y
- sv.fmadd *64,*64,1,*32 # fmadd
- stfdup/els *64,8(10) # store y-copy
- sv.bc/ctr .L2 # decrement VL by CTR
- blr # return
-
- # SV Version
- # a0 is n, a1 is ptr to x[0], a2 is ptr to y[0], fa0 is a (scalar)
- VBLK.REG[0] = {type: F, isvec: 1, regkey: a3, regidx: a3, elwidth: dflt}
- VBLK.REG[1] = {type: F, isvec: 1, regkey: a7, regidx: a7, elwidth: dflt}
- loop:
- VBLK.SETVL t0, a0, #4 # MVL=4, vl = t0 = min(a0, MVL))
- c.ld a3, a1 # load 4 registers a3-6 from x
- c.slli t1, t0, 3 # t1 = vl * 8 (in bytes: FP is double)
- c.ld a7, a2 # load 4 registers a7-10 from y
- c.add a1, a1, t1 # increment pointer to x by vl*8
- fmadd a7, a3, fa0, a7 # v1 += v0 * fa0 (y = a * x + y)
- c.sub a0, a0, t0 # n -= vl (t0)
- c.st a7, a2 # store 4 registers a7-10 to y
- c.add a2, a2, t1 # increment pointer to y by vl*8
- c.bnez a0, loop # repeat if n != 0
- c.ret # return
-
- # RVV version
+Summary
+
+| ISA | total | loop | words | notes |
+|-----|-------|------|-------|-------|
+| SVP64 | 9 | 7 | 14 | 5 64-bit, 4 32-bit |
+| RVV | 13 | 11 | 9.5 | 7 32-bit, 5 16-bit |
+| SVE | 12 | 7 | 12 | all 32-bit |
+
+# SVP64 Power ISA version
+
+Relies on post-increment, relies on no overlap between x and y
+in memory, and critically relies on y overwrite. x is post-incremented
+when read, but y is post-incremented on write. Element-Strided
+ensures the Immediate (8) results in a contiguous LD (or store)
+despite RA being marked Scalar (*without* modifying RA, on `sv.lfd/els`).
+For `sv.lfdup`, RA is Scalar so that only one
+LD/ST Update "wins": the last write to RA is the address for
+the next block.
+
+```
+ # r5: n count; r6: x ptr; r7: y ptr; fp1: a
+ 1 addi r3,r7,0 # return result
+ 2 mtctr 5 # move n to CTR
+ 3 .L2
+ 4 setvl MAXVL=32,VL=CTR # actually VL=MIN(MAXVL,CTR)
+ 5 sv.lfdup/els *32,8(6) # load x into fp32-63, incr x
+ 6 sv.lfd/els *64,8(7) # load y into fp64-95, NO INC
+ 7 sv.fmadd *64,*64,1,*32 # (*y) = (*y) * (*x) + a
+ 8 sv.stfdup/els *64,8(7) # store at y, incr y
+ 9 sv.bc/ctr .L2 # decr CTR by VL, jump !zero
+ 10 blr # return
+```
+
+# RVV version
+
+
+```
# a0 is n, a1 is pointer to x[0], a2 is pointer to y[0], fa0 is a
li t0, 2<<25
vsetdcfg t0 # enable 2 64b Fl.Pt. registers
c.bnez a0, loop # repeat if n != 0
c.ret # return
```
+
+# SVE Version
+
+```
+ 1 // x0 = &x[0], x1 = &y[0], x2 = &a, x3 = &n
+ 2 daxpy_:
+ 3 ldrswx3, [x3] // x3=*n
+ 4 movx4, #0 // x4=i=0
+ 5 whilelt p0.d, x4, x3 // p0=while(i++<n)
+ 6 ld1rdz0.d, p0/z, [x2] // p0:z0=bcast(*a)
+ 7 .loop:
+ 8 ld1d z1.d, p0/z, [x0, x4, lsl #3] // p0:z1=x[i]
+ 9 ld1d z2.d, p0/z, [x1, x4, lsl #3] // p0:z2=y[i]
+ 10 fmla z2.d, p0/m, z1.d, z0.d // p0?z2+=x[i]*a
+ 11 st1d z2.d, p0, [x1, x4, lsl #3] // p0?y[i]=z2
+ 12 incd x4 // i+=(VL/64)
+ 13 .latch:
+ 14 whilelt p0.d, x4, x3 // p0=while(i++<n)
+ 15 b.first .loop // more to do?
+ 16 ret
+```