added stdsx instruction
[openpower-isa.git] / openpower / isa / fixedloadshift.mdwn
1 <!-- This defines instructions described in PowerISA Version 3.0 B Book 1 -->
2
3 <!-- This defines instructions that load from RAM to a register -->
4
5 <!-- Note that these pages also define equivalent store instructions, -->
6 <!-- these are described in fixedstore.mdwn -->
7
8 <!-- Section 3.3.2 This defines the Fixed-Point Load Instructions pages 47 - 53 -->
9 <!-- Section 3.3.3 Fixed-Point Store Instructions pages 54 - 56 -->
10 <!-- Section 3.3.3.1 64-bit Fixed-Point Store Instructions pages 57 -->
11 <!-- Section 3.3.4 Fixed Point Load and Store Quadword Instructions pages 58 - 59 -->
12 <!-- Section 3.3.5 Fixed-Point Load and Store with Byte Reversal Instructions page 60 -->
13 <!-- Section 3.3.5.1 64-Bit Load and Store with Byte Reversal Instructions page 61 -->
14 <!-- Section 3.3.6 Fixed-Point Load and Store Multiple Instructions page 62 -->
15
16
17
18 <!-- Section 3.3.2 This defines the Fixed-Point Load Instructions pages 47 - 53 -->
19
20 <!-- The byte, halfword, word, or doubleword in storage addressed by EA is loaded -->
21 <!-- into register RT. -->
22
23 <!-- Many of the Load instructions have an “update” form, in which register RA is -->
24 <!-- updated with the effective address. For these forms, if RA!=0 and RA!=RT, the -->
25 <!-- effective address is placed into register RA and the storage element (byte, -->
26 <!-- halfword, word, or doubleword) addressed by EA is loaded into RT. -->
27
28
29 # Load Byte and Zero Shifted Indexed
30
31 X-Form
32
33 * lbzsx RT,RA,RB,SH
34
35 Pseudo-code:
36
37 b <- (RA|0)
38 EA <- b + (RB) << (SH+1)
39 RT <- ([0] * (XLEN-8)) || MEM(EA, 1)
40
41 Special Registers Altered:
42
43 None
44
45 # Load Byte and Zero Shifted with Update Indexed
46
47 X-Form
48
49 * lbzsux RT,RA,RB,SH
50
51 Pseudo-code:
52
53 EA <- (RA) + (RB) << (SH+1)
54 RT <- ([0] * (XLEN-8)) || MEM(EA, 1)
55 RA <- EA
56
57 Special Registers Altered:
58
59 None
60
61 # Load Halfword and Zero Shifted Indexed
62
63 X-Form
64
65 * lhzsx RT,RA,RB,SH
66
67 Pseudo-code:
68
69 b <- (RA|0)
70 EA <- b + (RB) << (SH+1)
71 RT <- ([0] * (XLEN-16)) || MEM(EA, 2)
72
73 Special Registers Altered:
74
75 None
76
77 # Load Halfword and Zero Shifted with Update Indexed
78
79 X-Form
80
81 * lhzsux RT,RA,RB,SH
82
83 Pseudo-code:
84
85 EA <- (RA) + (RB) << (SH+1)
86 RT <- ([0] * (XLEN-16)) || MEM(EA, 2)
87 RA <- EA
88
89 Special Registers Altered:
90
91 None
92
93 # Load Halfword Algebraic Shifted Indexed
94
95 X-Form
96
97 * lhasx RT,RA,RB,SH
98
99 Pseudo-code:
100
101 b <- (RA|0)
102 EA <- b + (RB) << (SH+1)
103 RT <- EXTS(MEM(EA, 2))
104
105 Special Registers Altered:
106
107 None
108
109 # Load Halfword Algebraic Shifted with Update Indexed
110
111 X-Form
112
113 * lhasux RT,RA,RB,SH
114
115 Pseudo-code:
116
117 EA <- (RA) + (RB) << (SH+1)
118 RT <- EXTS(MEM(EA, 2))
119 RA <- EA
120
121 Special Registers Altered:
122
123 None
124
125 # Load Word and Zero Shifted Indexed
126
127 X-Form
128
129 * lwzsx RT,RA,RB,SH
130
131 Pseudo-code:
132
133 b <- (RA|0)
134 EA <- b + (RB) << (SH+1)
135 RT <- [0] * 32 || MEM(EA, 4)
136
137 Special Registers Altered:
138
139 None
140
141 # Load Word and Zero Shifted with Update Indexed
142
143 X-Form
144
145 * lwzsux RT,RA,RB,SH
146
147 Pseudo-code:
148
149 EA <- (RA) + (RB) << (SH+1)
150 RT <- [0] * 32 || MEM(EA, 4)
151 RA <- EA
152
153 Special Registers Altered:
154
155 None
156
157 # Load Word Algebraic Shifted Indexed
158
159 X-Form
160
161 * lwasx RT,RA,RB,SH
162
163 Pseudo-code:
164
165 b <- (RA|0)
166 EA <- b + (RB) << (SH+1)
167 RT <- EXTS(MEM(EA, 4))
168
169 Special Registers Altered:
170
171 None
172
173 # Load Word Algebraic Shifted with Update Indexed
174
175 X-Form
176
177 * lwasux RT,RA,RB,SH
178
179 Pseudo-code:
180
181 EA <- (RA) + (RB) << (SH+1)
182 RT <- EXTS(MEM(EA, 4))
183 RA <- EA
184
185 Special Registers Altered:
186
187 None
188
189 # Load Doubleword Shifted Indexed
190
191 X-Form
192
193 * ldsx RT,RA,RB,SH
194
195 Pseudo-code:
196
197 b <- (RA|0)
198 EA <- b + (RB) << (SH+1)
199 RT <- MEM(EA, 8)
200
201 Special Registers Altered:
202
203 None
204
205 # Load Doubleword Shifted with Update Indexed
206
207 X-Form
208
209 * ldsux RT,RA,RB,SH
210
211 Pseudo-code:
212
213 EA <- (RA) + (RB) << (SH+1)
214 RT <- MEM(EA, 8)
215 RA <- EA
216
217 Special Registers Altered:
218
219 None
220
221 <!-- byte-reverse shifted -->
222
223 # Load Halfword Byte-Reverse Shifted Indexed
224
225 X-Form
226
227 * lhbrsx RT,RA,RB,SH
228
229 Pseudo-code:
230
231 b <- (RA|0)
232 EA <- b + (RB) << (SH+1)
233 load_data <- MEM(EA, 2)
234 RT <- [0]*48 || load_data[8:15] || load_data[0:7]
235
236 Special Registers Altered:
237
238 None
239
240 # Load Word Byte-Reverse Shifted Indexed
241
242 X-Form
243
244 * lwbrsx RT,RA,RB,SH
245
246 Pseudo-code:
247
248 b <- (RA|0)
249 EA <- b + (RB) << (SH+1)
250 load_data <- MEM(EA, 4)
251 RT <- ([0] * 32 || load_data[24:31] || load_data[16:23]
252 || load_data[8:15] || load_data[0:7])
253
254 Special Registers Altered:
255
256 None
257
258
259 <!-- Section 3.3.5.1 64-Bit Load and Store with Byte Reversal Instructions page 61 -->
260
261 # Load Doubleword Byte-Reverse Shifted Indexed
262
263 X-Form
264
265 * ldbrsx RT,RA,RB,SH
266
267 Pseudo-code:
268
269 b <- (RA|0)
270 EA <- b + (RB) << (SH+1)
271 load_data <- MEM(EA, 8)
272 RT <- (load_data[56:63] || load_data[48:55]
273 || load_data[40:47] || load_data[32:39]
274 || load_data[24:31] || load_data[16:23]
275 || load_data[8:15] || load_data[0:7])
276
277 Special Registers Altered:
278
279 None
280