<!-- halfword, word, or doubleword) addressed by EA is loaded into RT. -->
-# Load Byte and Zero Shifted Indexed
+# Load Byte and Zero Shifted Indexed
X-Form
-* lbzsx RT,RA,RB,sm
+* lbzsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB) << (sm+1)
+ EA <- b + (RB) << (SH+1)
RT <- ([0] * (XLEN-8)) || MEM(EA, 1)
+Description:
+
+ Let the effective address (EA) be the sum of the contents of
+ register RB shifted by (SH+1), and (RA|0).
+ The byte in storage addressed by EA is loaded into
+ RT[56:63]. RT[0:55] are set to 0.
+
+
Special Registers Altered:
None
X-Form
-* lbzsux RT,RA,RB,sm
+* lbzsux RT,RA,RB,SH
Pseudo-code:
- EA <- (RA) + (RB) << (sm+1)
+ EA <- (RA) + (RB) << (SH+1)
RT <- ([0] * (XLEN-8)) || MEM(EA, 1)
RA <- EA
X-Form
-* lhzsx RT,RA,RB,sm
+* lhzsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB) << (sm+1)
+ EA <- b + (RB) << (SH+1)
RT <- ([0] * (XLEN-16)) || MEM(EA, 2)
Special Registers Altered:
X-Form
-* lhzsux RT,RA,RB,sm
+* lhzsux RT,RA,RB,SH
Pseudo-code:
- EA <- (RA) + (RB) << (sm+1)
+ EA <- (RA) + (RB) << (SH+1)
RT <- ([0] * (XLEN-16)) || MEM(EA, 2)
RA <- EA
None
-# Load Halfword Algebraic Indexed
+# Load Halfword Algebraic Shifted Indexed
X-Form
-* lhax RT,RA,RB
+* lhasx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
RT <- EXTS(MEM(EA, 2))
Special Registers Altered:
None
-# Load Halfword Algebraic with Update Indexed
+# Load Halfword Algebraic Shifted with Update Indexed
X-Form
-* lhaux RT,RA,RB
+* lhasux RT,RA,RB,SH
Pseudo-code:
- EA <- (RA) + (RB)
+ EA <- (RA) + (RB) << (SH+1)
RT <- EXTS(MEM(EA, 2))
RA <- EA
None
-# Load Word and Zero Indexed
+# Load Word and Zero Shifted Indexed
X-Form
-* lwzx RT,RA,RB
+* lwzsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
RT <- [0] * 32 || MEM(EA, 4)
Special Registers Altered:
None
-# Load Word and Zero with Update Indexed
+# Load Word and Zero Shifted with Update Indexed
X-Form
-* lwzux RT,RA,RB
+* lwzsux RT,RA,RB,SH
Pseudo-code:
- EA <- (RA) + (RB)
+ EA <- (RA) + (RB) << (SH+1)
RT <- [0] * 32 || MEM(EA, 4)
RA <- EA
None
-# Load Word Algebraic Indexed
+# Load Word Algebraic Shifted Indexed
X-Form
-* lwax RT,RA,RB
+* lwasx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
RT <- EXTS(MEM(EA, 4))
Special Registers Altered:
None
-# Load Word Algebraic with Update Indexed
+# Load Word Algebraic Shifted with Update Indexed
X-Form
-* lwaux RT,RA,RB
+* lwasux RT,RA,RB,SH
Pseudo-code:
- EA <- (RA) + (RB)
+ EA <- (RA) + (RB) << (SH+1)
RT <- EXTS(MEM(EA, 4))
RA <- EA
None
-# Load Doubleword Indexed
+# Load Doubleword Shifted Indexed
X-Form
-* ldx RT,RA,RB
+* ldsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
RT <- MEM(EA, 8)
Special Registers Altered:
None
-# Load Doubleword with Update Indexed
+# Load Doubleword Shifted with Update Indexed
X-Form
-* ldux RT,RA,RB
+* ldsux RT,RA,RB,SH
Pseudo-code:
- EA <- (RA) + (RB)
+ EA <- (RA) + (RB) << (SH+1)
RT <- MEM(EA, 8)
RA <- EA
<!-- byte-reverse shifted -->
-# Load Halfword Byte-Reverse Indexed
+# Load Halfword Byte-Reverse Shifted Indexed
X-Form
-* lhbrx RT,RA,RB
+* lhbrsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
load_data <- MEM(EA, 2)
RT <- [0]*48 || load_data[8:15] || load_data[0:7]
None
-# Load Word Byte-Reverse Indexed
+# Load Word Byte-Reverse Shifted Indexed
X-Form
-* lwbrx RT,RA,RB
+* lwbrsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
load_data <- MEM(EA, 4)
RT <- ([0] * 32 || load_data[24:31] || load_data[16:23]
|| load_data[8:15] || load_data[0:7])
<!-- Section 3.3.5.1 64-Bit Load and Store with Byte Reversal Instructions page 61 -->
-# Load Doubleword Byte-Reverse Indexed
+# Load Doubleword Byte-Reverse Shifted Indexed
X-Form
-* ldbrx RT,RA,RB
+* ldbrsx RT,RA,RB,SH
Pseudo-code:
b <- (RA|0)
- EA <- b + (RB)
+ EA <- b + (RB) << (SH+1)
load_data <- MEM(EA, 8)
RT <- (load_data[56:63] || load_data[48:55]
|| load_data[40:47] || load_data[32:39]