fix dsld pseudocode to use ROTL64 instead of ROTL128
[openpower-isa.git] / openpower / isa / svfixedarith.mdwn
1 # [DRAFT] Multiply and Add Extended Doubleword
2
3 VA-Form
4
5 * maddedu RT,RA,RB,RC
6
7 Pseudo-code:
8
9 <!-- SVP64: RA,RB,RC,RT have EXTRA2, RS as below
10 <!-- bit 8 of EXTRA is set : RS.[s|v]=RT.[s|v]+MAXVL
11 <!-- bit 8 of EXTRA is clear: RS.[s|v]=RC.[s|v]
12 prod[0:127] <- (RA) * (RB)
13 sum[0:127] <- ([0] * 64 || (RC)) + prod
14 RT <- sum[64:127]
15 RS <- sum[0:63]
16
17 Special Registers Altered:
18
19 None
20
21 # [DRAFT] Divide/Modulo Double-width Doubleword Unsigned
22
23 VA-Form
24
25 * divmod2du RT,RA,RB,RC
26
27 Pseudo-code:
28
29 <!-- SVP64: RA,RB,RC,RT have EXTRA2, RS as below
30 <!-- bit 8 of EXTRA is set : RS.[s|v]=RT.[s|v]+MAXVL
31 <!-- bit 8 of EXTRA is clear: RS.[s|v]=RC.[s|v]
32 if ((RC) <u (RB)) & ((RB) != [0]*XLEN) then
33 dividend[0:(XLEN*2)-1] <- (RC) || (RA)
34 divisor[0:(XLEN*2)-1] <- [0]*XLEN || (RB)
35 result <- dividend / divisor
36 modulo <- dividend % divisor
37 RT <- result[XLEN:(XLEN*2)-1]
38 RS <- modulo[XLEN:(XLEN*2)-1]
39 overflow <- 0
40 else
41 overflow <- 1
42 RT <- [1]*XLEN
43 RS <- [0]*XLEN
44
45 Special Registers Altered:
46
47 None
48
49 # [DRAFT] Double-width Shift Left Doubleword
50
51 VA2-Form
52
53 * dsld RT,RA,RB,RC (Rc=0)
54 * dsld. RT,RA,RB,RC (Rc=1)
55
56 Pseudo-code:
57
58 n <- (RB)[58:63]
59 v <- ROTL64((RA), n)
60 mask <- MASK(64, 63-n)
61 RT <- (v[0:63] & mask) | ((RC) & ¬mask)
62 RS <- v[0:63] & ¬mask
63
64 Special Registers Altered:
65
66 CR0 (if Rc=1)
67
68 # [DRAFT] Double-width Shift Right Doubleword
69
70 VA2-Form
71
72 * dsrd RT,RA,RB,RC (Rc=0)
73 * dsrd. RT,RA,RB,RC (Rc=1)
74
75 Pseudo-code:
76
77 n <- (RB)[58:63]
78 v <- ROTL128((RA) || [0]*64, 128-n)
79 mask <- ¬MASK(n, 63)
80 RT <- v[0:63] | ((RC) & mask)
81 RS <- v[64:127]
82
83 Special Registers Altered:
84
85 CR0 (if Rc=1)