{ 12, 20, S390_OPERAND_DISP },
#define D_36 25 /* Displacement starting at position 36 */
{ 12, 36, S390_OPERAND_DISP },
+#define D20_20 26 /* 20 bit displacement starting at 20 */
+ { 20, 20, S390_OPERAND_DISP|S390_OPERAND_SIGNED },
-#define L4_8 26 /* 4 bit length starting at position 8 */
+#define L4_8 27 /* 4 bit length starting at position 8 */
{ 4, 8, S390_OPERAND_LENGTH },
-#define L4_12 27 /* 4 bit length starting at position 12 */
+#define L4_12 28 /* 4 bit length starting at position 12 */
{ 4, 12, S390_OPERAND_LENGTH },
-#define L8_8 28 /* 8 bit length starting at position 8 */
+#define L8_8 29 /* 8 bit length starting at position 8 */
{ 8, 8, S390_OPERAND_LENGTH },
-#define U4_8 29 /* 4 bit unsigned value starting at 8 */
+#define U4_8 30 /* 4 bit unsigned value starting at 8 */
{ 4, 8, 0 },
-#define U4_12 30 /* 4 bit unsigned value starting at 12 */
+#define U4_12 31 /* 4 bit unsigned value starting at 12 */
{ 4, 12, 0 },
-#define U4_16 31 /* 4 bit unsigned value starting at 16 */
+#define U4_16 32 /* 4 bit unsigned value starting at 16 */
{ 4, 16, 0 },
-#define U4_20 32 /* 4 bit unsigned value starting at 20 */
+#define U4_20 33 /* 4 bit unsigned value starting at 20 */
{ 4, 20, 0 },
-#define U8_8 33 /* 8 bit unsigned value starting at 8 */
+#define U8_8 34 /* 8 bit unsigned value starting at 8 */
{ 8, 8, 0 },
-#define U8_16 34 /* 8 bit unsigned value starting at 16 */
+#define U8_16 35 /* 8 bit unsigned value starting at 16 */
{ 8, 16, 0 },
-#define I16_16 35 /* 16 bit signed value starting at 16 */
+#define I16_16 36 /* 16 bit signed value starting at 16 */
{ 16, 16, S390_OPERAND_SIGNED },
-#define U16_16 36 /* 16 bit unsigned value starting at 16 */
+#define U16_16 37 /* 16 bit unsigned value starting at 16 */
{ 16, 16, 0 },
-#define J16_16 37 /* PC relative jump offset at 16 */
+#define J16_16 38 /* PC relative jump offset at 16 */
{ 16, 16, S390_OPERAND_PCREL },
-#define J32_16 38 /* PC relative long offset at 16 */
+#define J32_16 39 /* PC relative long offset at 16 */
{ 32, 16, S390_OPERAND_PCREL }
};
#define INSTR_RRF_F0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. madbr */
#define INSTR_RRF_FUFF 4, { F_24,F_16,F_28,U4_20,0,0 } /* e.g. didbr */
#define INSTR_RRF_RURR 4, { R_24,R_28,R_16,U4_20,0,0 } /* e.g. .insn */
+#define INSTR_RRF_R0RR 4, { R_24,R_28,R_16,0,0,0 } /* e.g. idte */
#define INSTR_RRF_U0FF 4, { F_24,U4_16,F_28,0,0,0 } /* e.g. cfxbr */
#define INSTR_RRF_U0FR 4, { F_24,U4_16,R_28,0,0,0 } /* e.g. cfebr */
#define INSTR_RRF_U0FR 4, { F_24,U4_16,R_28,0,0,0 } /* e.g. cfxbr */
#define INSTR_RR_UR 2, { U4_8,R_12,0,0,0,0 } /* e.g. bcr */
#define INSTR_RSE_RRRD 6, { R_8,R_12,D_20,B_16,0,0 } /* e.g. lmh */
#define INSTR_RSE_RURD 6, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icmh */
+#define INSTR_RSL_R0RD 6, { R_8,D_20,B_16,0,0,0 } /* e.g. tp */
#define INSTR_RSI_RRP 4, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxh */
+#define INSTR_RSY_RRRD 6, { R_8,R_12,D20_20,B_16,0,0 } /* e.g. stmy */
+#define INSTR_RSY_RURD 6, { R_8,U4_12,D20_20,B_16,0,0 } /* e.g. icmh */
+#define INSTR_RSY_AARD 6, { A_8,A_12,D20_20,B_16,0,0 } /* e.g. lamy */
#define INSTR_RS_AARD 4, { A_8,A_12,D_20,B_16,0,0 } /* e.g. lam */
#define INSTR_RS_CCRD 4, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lctl */
#define INSTR_RS_R0RD 4, { R_8,D_20,B_16,0,0,0 } /* e.g. sll */
#define INSTR_RXE_RRRD 6, { R_8,D_20,X_12,B_16,0,0 } /* e.g. lg */
#define INSTR_RXF_FRRDF 6, { F_32,F_8,D_20,X_12,B_16,0 } /* e.g. madb */
#define INSTR_RXF_RRRDR 6, { R_32,R_8,D_20,X_12,B_16,0 } /* e.g. .insn */
+#define INSTR_RXY_RRRD 6, { R_8,D20_20,X_12,B_16,0,0 } /* e.g. ly */
+#define INSTR_RXY_FRRD 6, { F_8,D20_20,X_12,B_16,0,0 } /* e.g. ley */
#define INSTR_RX_0RRD 4, { D_20,X_12,B_16,0,0,0 } /* e.g. be */
#define INSTR_RX_FRRD 4, { F_8,D_20,X_12,B_16,0,0 } /* e.g. ae */
#define INSTR_RX_RRRD 4, { R_8,D_20,X_12,B_16,0,0 } /* e.g. l */
#define INSTR_RX_URRD 4, { U4_8,D_20,X_12,B_16,0,0 } /* e.g. bc */
#define INSTR_SI_URD 4, { D_20,B_16,U8_8,0,0,0 } /* e.g. cli */
+#define INSTR_SIY_URD 6, { D20_20,B_16,U8_8,0,0,0 } /* e.g. tmy */
#define INSTR_SSE_RDRD 6, { D_20,B_16,D_36,B_32,0,0 } /* e.g. mvsdk */
#define INSTR_SS_L0RDRD 6, { D_20,L8_8,B_16,D_36,B_32,0 } /* e.g. mvc */
#define INSTR_SS_LIRDRD 6, { D_20,L4_8,B_16,D_36,B_32,U4_12 } /* e.g. srp */
#define MASK_RRF_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_FUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_RURR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RRF_R0RR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RRF_U0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_U0FR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RRF_U0FR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 }
#define MASK_RR_UR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RSE_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSE_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RSL_R0RD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RSI_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RS_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_RSY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RSY_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RSY_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXE_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXE_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXF_FRRDF { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RXF_RRRDR { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RXY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
+#define MASK_RXY_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_RX_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_RX_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SI_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define MASK_SIY_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff }
#define MASK_SSE_RDRD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SS_L0RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define MASK_SS_LIRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 }
{ "rs", OP8(0x00LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0 },
{ "rse", OP8(0x00LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0 },
{ "rsi", OP8(0x00LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0 },
+ { "rsy", OP8(0x00LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3 },
{ "rx", OP8(0x00LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0 },
{ "rxe", OP8(0x00LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 0 },
{ "rxf", OP8(0x00LL), MASK_RXF_RRRDR, INSTR_RXF_RRRDR,3, 0 },
+ { "rxy", OP8(0x00LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3 },
{ "s", OP8(0x00LL), MASK_S_RD, INSTR_S_RD, 3, 0 },
{ "si", OP8(0x00LL), MASK_SI_URD, INSTR_SI_URD, 3, 0 },
+ { "siy", OP8(0x00LL), MASK_SIY_URD, INSTR_SIY_URD, 3, 3 },
{ "ss", OP8(0x00LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD,3, 0 },
{ "sse", OP8(0x00LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0 },
};
b1 lra RX_RRRD "load real address" g5 esa,zarch
25 lrdr RR_FF "load rounded (ext. to long)" g5 esa,zarch
35 lrer RR_FF "load rounded (long to short)" g5 esa,zarch
+25 ldxr RR_FF "load rounded (ext. to long)" g5 esa,zarch
+35 ledr RR_FF "load rounded (long to short)" g5 esa,zarch
22 ltdr RR_FF "load and test (long)" g5 esa,zarch
32 lter RR_FF "load and test (short)" g5 esa,zarch
12 ltr RR_RR "load and test" g5 esa,zarch
6c md RX_FRRD "multiply (long)" g5 esa,zarch
2c mdr RR_FF "multiply (long)" g5 esa,zarch
7c me RX_FRRD "multiply (short to long)" g5 esa,zarch
+7c mde RX_FRRD "multiply (short to long)" g5 esa,zarch
3c mer RR_FF "multiply (short to long)" g5 esa,zarch
+3c mder RR_FF "multiply short to long hfp" g5 esa,zarch
4c mh RX_RRRD "multiply halfword" g5 esa,zarch
fc mp SS_LLRDRD "multiply decimal" g5 esa,zarch
1c mr RR_RR "multiply" g5 esa,zarch
e30000000091 llgh RXE_RRRD "load logical halfword" z900 zarch
eb000000001c rllg RSE_RRRD "rotate left single logical 64" z900 zarch
eb000000001d rll RSE_RRRD "rotate left single logical 32" z900 esa,zarch
+b369 cxr RRE_FF "compare extended hfp" g5 esa,zarch
+b3b6 cxfr RRE_RF "convert from fixed 32 to extended hfp" g5 esa,zarch
+b3b5 cdfr RRE_RF "convert from fixed 32 to long hfp" g5 esa,zarch
+b3b4 cefr RRE_RF "convert from fixed 32 to short hfp" g5 esa,zarch
+b3ba cfxr RRF_U0FR "convert to fixed extended hfp to 32" z900 zarch
+b3b9 cfdr RRF_U0FR "convert to fixed long hfp to 32" z900 zarch
+b3b8 cfer RRF_U0FR "convert to fixed short hfp to 32" z900 zarch
+b362 ltxr RRE_FF "load and test extended hfp" g5 esa,zarch
+b363 lcxr RRE_FF "load complement extended hfp" g5 esa,zarch
+b367 fixr RRF_U0FF "load fp integer extended hfp" g5 esa,zarch
+b37f fidr RRF_U0FF "load fp integer long hfp" g5 esa,zarch
+b377 fier RRF_U0FF "load fp integer short hfp" g5 esa,zarch
+b325 lxdr RRE_FF "load lengthened long to extended hfp" g5 esa,zarch
+ed0000000025 lxd RXE_FRRD "load lengthened long to extended hfp" g5 esa,zarch
+b326 lxer RRE_FF "load lengthened short to extended hfp" g5 esa,zarch
+ed0000000026 lxe RXE_FRRD "load lengthened short to extended hfp" g5 esa,zarch
+b324 lder RRE_FF "load lengthened short to long hfp" g5 esa,zarch
+ed0000000024 lde RXE_FRRD "load lengthened short to long hfp" g5 esa,zarch
+b361 lnxr RRE_FF "load negative long hfp" g5 esa,zarch
+b360 lpxr RRE_FF "load positive long hfp" g5 esa,zarch
+b366 lexr RRE_FF "load rounded extended to short hfp" g5 esa,zarch
+35 ledr RR_FF "load rounded long to short hfp" g5 esa,zarch
+b337 meer RRE_FF "multiply short hfp" g5 esa,zarch
+ed0000000037 mee RXE_FRRD "multiply short hfp" g5 esa,zarch
+b336 sqxr RRE_FF "square root extended hfp" g5 esa,zarch
+ed0000000034 sqe RXE_FRRD "square root short hfp" g5 esa,zarch
+b263 cmpsc RRE_RR "compression call" g5 esa,zarch
+eb00000000c0 tp RSL_R0RD "test decimal" g5 esa,zarch
+b365 lxr RRE_RR "load extended hfp" g5 esa,zarch
+b22e pgin RRE_RR "page in" g5 esa,zarch
+b22f pgout RRE_RR "page out" g5 esa,zarch
+b276 xsch S_00 "cancel subchannel" g5 esa,zarch
+# New long displacement instructions on z990
+e3000000005a ay RXY_RRRD "add with long offset" z990 zarch
+e3000000007a ahy RXY_RRRD "add halfword with long offset" z990 zarch
+e3000000005e aly RXY_RRRD "add logical with long offset" z990 zarch
+eb0000000054 niy SIY_URD "and immediate with long offset" z990 zarch
+e30000000054 ny RXY_RRRD "and with long offset" z990 zarch
+e30000000059 cy RXY_RRRD "compare with long offset" z990 zarch
+eb0000000014 csy RSY_RRRD "compare and swap with long offset" z990 zarch
+eb0000000031 cdsy RSY_RRRD "compare double and swap with long offset" z990 zarch
+e30000000079 chy RXY_RRRD "compare halfword with long offset" z990 zarch
+e30000000055 cly RXY_RRRD "compare logical with long offset" z990 zarch
+eb0000000055 cliy SIY_URD "compare logical immediate with long offset" z990 zarch
+eb0000000021 clmy RSY_RURD "compare logical characters under mask with long offset" z990 zarch
+e30000000006 cvby RXY_RRRD "convert to binary with long offset" z990 zarch
+e30000000026 cvdy RXY_RRRD "convert to decimal with long offset" z990 zarch
+eb0000000057 xiy SIY_URD "exclusive or immediate with long offset" z990 zarch
+e30000000057 xy RXY_RRRD "exclusive or with long offset" z990 zarch
+e30000000073 icy RXY_RRRD "insert character with long offset" z990 zarch
+eb0000000081 icmy RSY_RURD "insert characters with long offset" z990 zarch
+ed0000000065 ldy RXY_FRRD "load (long) with long offset" z990 zarch
+ed0000000064 ley RXY_FRRD "load (short) with long offset" z990 zarch
+e30000000058 ly RXY_RRRD "load with long offset" z990 zarch
+eb000000009a lamy RSY_AARD "load access multiple" z990 zarch
+e30000000071 lay RXY_RRRD "load address with long offset" z990 zarch
+e30000000076 lb RXY_RRRD "load byte with long offset" z990 zarch
+e30000000077 lgb RXY_RRRD "load byte with long offset 64" z990 zarch
+e30000000078 lhy RXY_RRRD "load halfword with long offset" z990 zarch
+eb0000000098 lmy RSY_RRRD "load multiple with long offset" z990 zarch
+e30000000013 lray RXY_RRRD "load real address with long offset" z990 zarch
+eb0000000052 mviy SIY_URD "move immediate with long offset" z990 zarch
+e30000000051 msy RXY_RRRD "multiply single with long offset" z990 zarch
+eb0000000056 oiy SIY_URD "or immediate with long offset" z990 zarch
+e30000000056 oy RXY_RRRD "or with long offset" z990 zarch
+ed0000000067 stdy RXY_FRRD "load (long) with long offset" z990 zarch
+ed0000000066 stey RXY_FRRD "load (short) with long offset" z990 zarch
+e30000000050 sty RXY_RRRD "store with long offset" z990 zarch
+eb000000009b stamy RSY_AARD "store access multiple with long offset" z990 zarch
+e30000000072 stcy RXY_RRRD "store character with long offset" z990 zarch
+eb000000002d stcmy RSY_RURD "store characters under mask with long offset" z990 zarch
+e30000000070 sthy RXY_RRRD "store halfword with long offset" z990 zarch
+eb0000000090 stmy RSY_RRRD "store multiple with long offset" z990 zarch
+e3000000005b sy RXY_RRRD "subtract with long offset" z990 zarch
+e3000000007b shy RXY_RRRD "subtract halfword with long offset" z990 zarch
+e3000000005f sly RXY_RRRD "subtract logical with long offset" z990 zarch
+eb0000000051 tmy SIY_URD "test under mask with long offset" z990 zarch
+# 'old' instructions extended to long displacement
+# these instructions are entered into the opcode table twice.
+e30000000003 lrag RXY_RRRD "load real address with long offset 64" z990 zarch
+e30000000004 lg RXY_RRRD " load 64" z990 zarch
+e30000000008 ag RXY_RRRD "add with long offset 64" z990 zarch
+e30000000009 sg RXY_RRRD "subtract with long offset 64" z990 zarch
+e3000000000a alg RXY_RRRD "add logical with long offset 64" z990 zarch
+e3000000000b slg RXY_RRRD "subtract logical with long offset 64" z990 zarch
+e3000000000c msg RXY_RRRD "multiply single with long offset 64" z990 zarch
+e3000000000d dsg RXY_RRRD "divide single 64" z990 zarch
+e3000000000e cvbg RXY_RRRD "convert to binary with long offset 64" z990 zarch
+e3000000000f lrvg RXY_RRRD "load reversed 64" z990 zarch
+e30000000014 lgf RXY_RRRD "load 64<32" z990 zarch
+e30000000015 lgh RXY_RRRD "load halfword 64" z990 zarch
+e30000000016 llgf RXY_RRRD "load logical 64<32" z990 zarch
+e30000000017 llgt RXY_RRRD "load logical thirty one bits" z990 zarch
+e30000000018 agf RXY_RRRD "add with long offset 64<32" z990 zarch
+e30000000019 sgf RXY_RRRD "subtract with long offset 64<32" z990 zarch
+e3000000001a algf RXY_RRRD "add logical with long offset 64<32" z990 zarch
+e3000000001b slgf RXY_RRRD "subtract logical with long offset 64<32" z990 zarch
+e3000000001c msgf RXY_RRRD "multiply single with long offset 64<32" z990 zarch
+e3000000001d dsgf RXY_RRRD "divide single 64<32" z990 zarch
+e3000000001e lrv RXY_RRRD "load reversed 32" z990 zarch
+e3000000001f lrvh RXY_RRRD "load reversed 16" z990 zarch
+e30000000020 cg RXY_RRRD "compare with long offset 64" z990 zarch
+e30000000021 clg RXY_RRRD "compare logical with long offset 64" z990 zarch
+e30000000024 stg RXY_RRRD "store with long offset 64" z990 zarch
+e3000000002e cvdg RXY_RRRD "convert to decimal with long offset 64" z990 zarch
+e3000000002f strvg RXY_RRRD "store reversed 64" z990 zarch
+e30000000030 cgf RXY_RRRD "compare with long offset 64<32" z990 zarch
+e30000000031 clgf RXY_RRRD "compare logical with long offset 64<32" z990 zarch
+e3000000003e strv RXY_RRRD "store reversed 32" z990 zarch
+e3000000003f strvh RXY_RRRD "store reversed 64" z990 zarch
+e30000000046 bctg RXY_RRRD "branch on count 64" z990 zarch
+e30000000080 ng RXY_RRRD "and with long offset 64" z990 zarch
+e30000000081 og RXY_RRRD "or with long offset 64" z990 zarch
+e30000000082 xg RXY_RRRD "exclusive or with long offset 64" z990 zarch
+e30000000086 mlg RXY_RRRD "multiply logical 64" z990 zarch
+e30000000087 dlg RXY_RRRD "divide logical 64" z990 zarch
+e30000000088 alcg RXY_RRRD "add logical with carry 64" z990 zarch
+e30000000089 slbg RXY_RRRD "subtract logical with borrow 64" z990 zarch
+e3000000008e stpq RXY_RRRD "store pair to quadword" z990 zarch
+e3000000008f lpq RXY_RRRD "load pair from quadword" z990 zarch
+e30000000090 llgc RXY_RRRD "load logical character" z990 zarch
+e30000000091 llgh RXY_RRRD "load logical halfword" z990 zarch
+e30000000096 ml RXY_RRRD "multiply logical 32" z990 zarch
+e30000000097 dl RXY_RRRD "divide logical 32" z990 zarch
+e30000000098 alc RXY_RRRD "add logical with carry 32" z990 zarch
+e30000000099 slb RXY_RRRD "subtract logical with borrow 32" z990 zarch
+eb0000000004 lmg RSY_RRRD "load multiple with long offset 64" z990 zarch
+eb000000000a srag RSY_RRRD "shift right single 64" z990 zarch
+eb000000000b slag RSY_RRRD "shift left single 64" z990 zarch
+eb000000000c srlg RSY_RRRD "shift right single logical 64" z990 zarch
+eb000000000d sllg RSY_RRRD "shift left single logical 64" z990 zarch
+eb000000000f tracg RSY_RRRD "trace 64" z990 zarch
+eb000000001c rllg RSY_RRRD "rotate left single logical 64" z990 zarch
+eb000000001d rll RSY_RRRD "rotate left single logical 32" z990 zarch
+eb0000000020 clmh RSY_RURD "compare logical characters under mask high with long offset" z990 zarch
+eb0000000024 stmg RSY_RRRD "store multiple with long offset 64" z990 zarch
+eb0000000025 stctg RSY_RRRD "store control 64" z990 zarch
+eb0000000026 stmh RSY_RRRD "store multiple high" z990 zarch
+eb000000002c stcmh RSY_RURD "store characters under mask high with long offset" z990 zarch
+eb000000002f lctlg RSY_RRRD "load control 64" z990 zarch
+eb0000000030 csg RSY_RRRD "compare and swap with long offset 64" z990 zarch
+eb000000003e cdsg RSY_RRRD "compare double and swap with long offset 64" z990 zarch
+eb0000000044 bxhg RSY_RRRD "branch on index high 64" z990 zarch
+eb0000000045 bxleg RSY_RRRD "branch on index low or equal 64" z990 zarch
+eb0000000080 icmh RSY_RURD "insert characters under mask high with long offset" z990 zarch
+eb000000008e mvclu RSY_RRRD "move long unicode" z990 zarch
+eb000000008f clclu RSY_RRRD "compare logical long unicode with long offset" z990 zarch
+eb0000000096 lmh RSY_RRRD "load multiple high" z990 zarch
+# new z990 instructions
+b98a cspg RRE_RR "compare and swap and purge" z990 zarch
+b98e idte RRF_R0RR "invalidate dat table entry" z990 zarch
+b33e madr RRF_F0FF "multiply and add long hfp" z990 esa,zarch
+ed000000003e mad RXF_FRRDF "multiply and add long hfp" z990 esa,zarch
+b32e maer RRF_F0FF "multiply and add short hfp" z990 esa,zarch
+ed000000002e mae RXF_FRRDF "multiply and add shoft hfp" z990 esa,zarch
+b33f msdr RRF_F0FF "multiply and subtract long hfp" z990 esa,zarch
+ed000000003f msd RXF_FRRDF "multiply and subtract long hfp" z990 esa,zarch
+b32f mser RRF_F0FF "mutliply and subtract short hfp" z990 esa,zarch
+ed000000002f mse RXF_FRRDF "multiply and subttract short hfp" z990 esa,zarch
+b92e km RRE_RR "cipher message" z990 esa,zarch
+b92f kmc RRE_RR "cipher message with chaining" z990 esa,zarch
+b93e kimd RRE_RR "compute intermediate message digest" z990 esa,zarch
+b93f klmd RRE_RR "compute last message digest" z990 esa,zarch
+b91e kmac RRE_RR "compute message authentication code" z990 esa,zarch