1 # [DRAFT] Multiply and Add Extended Doubleword
9 <!-- SVP64: RA,RB,RC,RT have EXTRA2, RS as below
10 <!-- bit 8 of EXTRA is set : RS.[s|v]=RT.[s|v]+MAXVL
11 <!-- bit 8 of EXTRA is clear: RS.[s|v]=RC.[s|v]
12 prod[0:127] <- (RA) * (RB)
13 sum[0:127] <- ([0] * 64 || (RC)) + prod
17 Special Registers Altered:
21 # [DRAFT] Divide/Modulo Double-width Doubleword Unsigned
25 * divmod2du RT,RA,RB,RC
29 <!-- SVP64: RA,RB,RC,RT have EXTRA2, RS as below
30 <!-- bit 8 of EXTRA is set : RS.[s|v]=RT.[s|v]+MAXVL
31 <!-- bit 8 of EXTRA is clear: RS.[s|v]=RC.[s|v]
32 if ((RC) <u (RB)) & ((RB) != [0]*XLEN) then
33 dividend[0:(XLEN*2)-1] <- (RC) || (RA)
34 divisor[0:(XLEN*2)-1] <- [0]*XLEN || (RB)
35 result <- dividend / divisor
36 modulo <- dividend % divisor
37 RT <- result[XLEN:(XLEN*2)-1]
38 RS <- modulo[XLEN:(XLEN*2)-1]
45 Special Registers Altered:
49 # [DRAFT] Double-width Shift Left Doubleword
53 * dsld RT,RA,RB,RC (Rc=0)
54 * dsld. RT,RA,RB,RC (Rc=1)
59 v <- ROTL128([0]*64 || (RA), n)
60 mask <- ¬MASK(64, 63-n)
61 RT <- v[64:127] | ((RC) & mask)
64 Special Registers Altered:
68 # [DRAFT] Double-width Shift Right Doubleword
72 * dsrd RT,RA,RB,RC (Rc=0)
73 * dsrd. RT,RA,RB,RC (Rc=1)
78 v <- ROTL128((RA) || [0]*64, 128-n)
80 RT <- v[0:63] | ((RC) & mask)
83 Special Registers Altered: