4 void daxpy(size_t n, double a, const double x[], double y[])
6 for (size_t i = 0; i < n; i++) {
12 # SVP64 Power ISA version
19 1 mtctr 5 # move n to CTR
20 2 addi r10,r6,0 # copy y-ptr into r10 (y')
22 4 setvl MAXVL=32,VL=CTR # actually VL=MIN(MAXVL,CTR)
23 5 sv.lfdup *32,8(6) # load x into fp32-63, inc x
24 6 sv.lfdup *64,8(7) # load y into fp64-95, inc y
25 7 sv.fmadd *64,*64,1,*32 # (*y) = (*y) * (*x) + fp1
26 8 sv.stfdup *64,8(10) # store at y-copy, inc y'
27 9 sv.bc/ctr .L2 # decrement CTR by VL
31 A refinement, reducing 1 instruction and register port usage.
32 Relies on post-increment, relies on no overlap between x and y
33 in memory, and critically relies on y overwrite.
40 1 mtctr 5 # move n to CTR
42 3 setvl MAXVL=32,VL=CTR # actually VL=MIN(MAXVL,CTR)
43 4 sv.lfdup *32,8(6) # load x into fp32-63, incr x
44 5 sv.lfd *64,8(7) # load y into fp64-95, NO INC
45 6 sv.fmadd *64,*64,1,*32 # (*y) = (*y) * (*x) + fp1
46 7 sv.stfdup *64,8(7) # store at y, incr y
47 8 sv.bc/ctr .L2 # decrement CTR by VL
54 # a0 is n, a1 is pointer to x[0], a2 is pointer to y[0], fa0 is a
56 vsetdcfg t0 # enable 2 64b Fl.Pt. registers
58 setvl t0, a0 # vl = t0 = min(mvl, n)
59 vld v0, a1 # load vector x
60 c.slli t1, t0, 3 # t1 = vl * 8 (in bytes)
61 vld v1, a2 # load vector y
62 c.add a1, a1, t1 # increment pointer to x by vl*8
63 vfmadd v1, v0, fa0, v1 # v1 += v0 * fa0 (y = a * x + y)
64 c.sub a0, a0, t0 # n -= vl (t0)
66 c.add a2, a2, t1 # increment pointer to y by vl*8
67 c.bnez a0, loop # repeat if n != 0
74 1 // x0 = &x[0], x1 = &y[0], x2 = &a, x3 = &n
76 3 ldrswx3, [x3] // x3=*n
78 5 whilelt p0.d, x4, x3 // p0=while(i++<n)
79 6 ld1rdz0.d, p0/z, [x2] // p0:z0=bcast(*a)
81 8 ld1d z1.d, p0/z, [x0, x4, lsl #3] // p0:z1=x[i]
82 9 ld1d z2.d, p0/z, [x1, x4, lsl #3] // p0:z2=y[i]
83 10 fmla z2.d, p0/m, z1.d, z0.d // p0?z2+=x[i]*a
84 11 st1d z2.d, p0, [x1, x4, lsl #3] // p0?y[i]=z2
85 12 incd x4 // i+=(VL/64)
87 14 whilelt p0.d, x4, x3 // p0=while(i++<n)
88 15 b.first .loop // more to do?