1 # [DRAFT] Multiply and Add Extended Doubleword
9 <!-- SVP64: RA,RB,RC,RT have EXTRA2, RS as below
10 <!-- bit 8 of EXTRA is set : RS.[s|v]=RT.[s|v]+MAXVL
11 <!-- bit 8 of EXTRA is clear: RS.[s|v]=RC.[s|v]
12 prod[0:127] <- (RA) * (RB)
13 sum[0:127] <- ([0] * 64 || (RC)) + prod
17 Special Registers Altered:
21 # [DRAFT] Divide/Modulo Double-width Doubleword Unsigned
25 * divmod2du RT,RA,RB,RC
29 <!-- SVP64: RA,RB,RC,RT have EXTRA2, RS as below
30 <!-- bit 8 of EXTRA is set : RS.[s|v]=RT.[s|v]+MAXVL
31 <!-- bit 8 of EXTRA is clear: RS.[s|v]=RC.[s|v]
32 if ((RC) <u (RB)) & ((RB) != [0]*XLEN) then
33 dividend[0:(XLEN*2)-1] <- (RC) || (RA)
34 divisor[0:(XLEN*2)-1] <- [0]*XLEN || (RB)
35 result <- dividend / divisor
36 modulo <- dividend % divisor
37 RT <- result[XLEN:(XLEN*2)-1]
38 RS <- modulo[XLEN:(XLEN*2)-1]
45 Special Registers Altered:
49 # [DRAFT] Double-width Shift Left Doubleword
53 * dsld RT,RA,RB,RC (Rc=0)
54 * dsld. RT,RA,RB,RC (Rc=1)
59 v <- ROTL128([0]*64 || (RA), n)
60 RT <- v[64:127] | ((RC) & MASK(n, 63))
63 Special Registers Altered:
67 # [DRAFT] Double-width Shift Right Doubleword
71 * dsrd RT,RA,RB,RC (Rc=0)
72 * dsrd. RT,RA,RB,RC (Rc=1)
80 mask[0:63] <- MASK(0, 63 - n)
81 v[0:63] <- (hi & ¬mask) | (lo & mask)
82 RT <- ROTL64(v, 64 - n)
84 Special Registers Altered: