PowerPC update comments for the MMA instruction name changes.
authorCarl Love <cel@us.ibm.com>
Fri, 4 Nov 2022 16:14:01 +0000 (12:14 -0400)
committerCarl Love <cel@us.ibm.com>
Fri, 4 Nov 2022 16:14:01 +0000 (12:14 -0400)
The mnemonics for the pmxvf16ger*, pmxvf32ger*,pmxvf64ger*, pmxvi4ger8*,
pmxvi8ger4*, and pmxvi16ger2* instructions were officially changed to
pmdmxbf16ger*, pmdmxvf32ger*, pmdmxvf64ger*, pmdmxvi4ger8*, pmdmxvi8ger4*,
pmdmxvi16ger* respectively.  The old mnemonics are still supported by the
assembler as extended mnemonics.  The disassembler generates the new
mnemonics.  The name changes occurred in commit:

  commit bb98553cad4e017f1851153fa5de91f2cee98fb2
  Author: Peter Bergner <bergner@linux.ibm.com>
  Date:   Sat Oct 8 16:19:51 2022 -0500

    PowerPC: Add support for RFC02658 - MMA+ Outer-Product Instructions

    gas/
            * config/tc-ppc.c (md_assemble): Only check for prefix opcodes.
            * testsuite/gas/ppc/rfc02658.s: New test.
            * testsuite/gas/ppc/rfc02658.d: Likewise.
            * testsuite/gas/ppc/ppc.exp: Run it.

    opcodes/
            * ppc-opc.c (XMSK8, P_GERX4_MASK, P_GERX2_MASK, XX3GERX_MASK): New.
            (powerpc_opcodes): Add dmxvi8gerx4pp, dmxvi8gerx4, dmxvf16gerx2pp,
            dmxvf16gerx2, dmxvbf16gerx2pp, dmxvf16gerx2np, dmxvbf16gerx2,
            dmxvi8gerx4spp, dmxvbf16gerx2np, dmxvf16gerx2pn, dmxvbf16gerx2pn,
            dmxvf16gerx2nn, dmxvbf16gerx2nn, pmdmxvi8gerx4pp, pmdmxvi8gerx4,
            pmdmxvf16gerx2pp, pmdmxvf16gerx2, pmdmxvbf16gerx2pp, pmdmxvf16gerx2np,
            pmdmxvbf16gerx2, pmdmxvi8gerx4spp, pmdmxvbf16gerx2np, pmdmxvf16gerx2pn,
            pmdmxvbf16gerx2pn, pmdmxvf16gerx2nn, pmdmxvbf16gerx2nn.

This patch updates the comments in the various gdb files to reflect the
name changes.  There are no functional changes made by this patch.

The older instruction names are still used in the test
gdb.reverse/ppc_record_test_isa_3_1.exp for backwards compatibility.

Patch has been tested on Power 10 with no regressions.

gdb/rs6000-tdep.c
gdb/testsuite/gdb.reverse/ppc_record_test_isa_3_1.c
gdb/testsuite/gdb.reverse/ppc_record_test_isa_3_1.exp

index aac424a65be8fe23e3aebe280bf7d0eea530253b..866d43ded7a37f7a13d8c1a190b1e30a5b3fcdf5 100644 (file)
@@ -5535,6 +5535,10 @@ ppc_process_record_op59 (struct gdbarch *gdbarch, struct regcache *regcache,
   int ext = PPC_EXTOP (insn);
   int at = PPC_FIELD (insn, 6, 3);
 
+  /* Note the mnemonics for the pmxvf64ger* instructions were officially
+     changed to pmdmxvf64ger*.  The old mnemonics are still supported as
+     extended mnemonics.  */
+
   switch (ext & 0x1f)
     {
     case 18:           /* Floating Divide */
@@ -5603,7 +5607,8 @@ ppc_process_record_op59 (struct gdbarch *gdbarch, struct regcache *regcache,
     case 218:  /* VSX Vector 32-bit Floating-Point GER Negative multiply,
                   Negative accumulate, xvf32gernn */
 
-    case 59:   /* VSX Vector 64-bit Floating-Point GER, pmxvf64ger */
+    case 59:   /* VSX Vector 64-bit Floating-Point GER, pmdmxvf64ger
+                  (pmxvf64ger)  */
     case 58:   /* VSX Vector 64-bit Floating-Point GER Positive multiply,
                   Positive accumulate, xvf64gerpp */
     case 186:  /* VSX Vector 64-bit Floating-Point GER Positive multiply,
@@ -5611,7 +5616,7 @@ ppc_process_record_op59 (struct gdbarch *gdbarch, struct regcache *regcache,
     case 122:  /* VSX Vector 64-bit Floating-Point GER Negative multiply,
                   Positive accumulate, xvf64gernp */
     case 250:  /* VSX Vector 64-bit Floating-Point GER Negative multiply,
-                  Negative accumulate, pmxvf64gernn */
+                  Negative accumulate, pmdmxvf64gernn (pmxvf64gernn)  */
 
     case 51:   /* VSX Vector bfloat16 GER, xvbf16ger2 */
     case 50:   /* VSX Vector bfloat16 GER Positive multiply,
@@ -6486,98 +6491,106 @@ ppc_process_record_prefix_op59_XX3 (struct gdbarch *gdbarch,
   int at = PPC_FIELD (insn_suffix, 6, 3);
   ppc_gdbarch_tdep *tdep = gdbarch_tdep<ppc_gdbarch_tdep> (gdbarch);
 
+  /* Note, the mnemonics for the pmxvf16ger*, pmxvf32ger*,pmxvf64ger*,
+     pmxvi4ger8*, pmxvi8ger4* pmxvi16ger2* instructions were officially
+     changed to pmdmxbf16ger*, pmdmxvf32ger*, pmdmxvf64ger*, pmdmxvi4ger8*,
+     pmdmxvi8ger4*, pmdmxvi16ger* respectively.  The old mnemonics are still
+     supported by the assembler as extended mnemonics.  The disassembler
+     generates the new mnemonics.  */
   if (type == 3)
     {
       if (ST4 == 9)
        switch (opcode)
          {
          case 35:      /* Prefixed Masked VSX Vector 4-bit Signed Integer GER
-                          MMIRR, pmxvi4ger8 */
+                          MMIRR, pmdmxvi4ger8 (pmxvi4ger8) */
          case 34:      /* Prefixed Masked VSX Vector 4-bit Signed Integer GER
-                          MMIRR, pmxvi4ger8pp */
+                          MMIRR, pmdmxvi4ger8pp (pmxvi4ger8pp) */
 
          case 99:      /* Prefixed Masked VSX Vector 8-bit Signed/Unsigned
                           Integer GER with Saturate Positive multiply,
                           Positive accumulate, xvi8ger4spp */
 
          case 3:       /* Prefixed Masked VSX Vector 8-bit Signed/Unsigned
-                          Integer GER MMIRR, pmxvi8ger4 */
+                          Integer GER MMIRR, pmdmxvi8ger4 (pmxvi8ger4)  */
          case 2:       /* Prefixed Masked VSX Vector 8-bit Signed/Unsigned
                           Integer GER Positive multiply, Positive accumulate
-                          MMIRR, pmxvi8ger4pp */
+                          MMIRR, pmdmxvi8ger4pp (pmxvi8ger4pp)  */
 
          case 75:      /* Prefixed Masked VSX Vector 16-bit Signed Integer
-                          GER MMIRR, pmxvi16ger2 */
+                          GER MMIRR, pmdmxvi16ger2 (pmxvi16ger2)  */
          case 107:     /* Prefixed Masked VSX Vector 16-bit Signed Integer
                           GER  Positive multiply, Positive accumulate,
-                          pmxvi16ger2pp */
+                          pmdmxvi16ger2pp (pmxvi16ger2pp)  */
 
          case 43:      /* Prefixed Masked VSX Vector 16-bit Signed Integer
-                          GER with Saturation MMIRR, pmxvi16ger2s */
+                          GER with Saturation MMIRR, pmdmxvi16ger2s
+                          (pmxvi16ger2s)  */
          case 42:      /* Prefixed Masked VSX Vector 16-bit Signed Integer
                           GER with Saturation Positive multiply, Positive
-                          accumulate MMIRR, pmxvi16ger2spp */
+                          accumulate MMIRR, pmdmxvi16ger2spp (pmxvi16ger2spp)
+                       */
            ppc_record_ACC_fpscr (regcache, tdep, at, false);
            return 0;
 
          case 19:      /* Prefixed Masked VSX Vector 16-bit Floating-Point
-                          GER MMIRR, pmxvf16ger2 */
+                          GER MMIRR, pmdmxvf16ger2 (pmxvf16ger2)  */
          case 18:      /* Prefixed Masked VSX Vector 16-bit Floating-Point
                           GER Positive multiply, Positive accumulate MMIRR,
-                          pmxvf16ger2pp */
+                          pmdmxvf16ger2pp (pmxvf16ger2pp)  */
          case 146:     /* Prefixed Masked VSX Vector 16-bit Floating-Point
                           GER Positive multiply, Negative accumulate MMIRR,
-                          pmxvf16ger2pn */
+                          pmdmxvf16ger2pn (pmxvf16ger2pn)  */
          case 82:      /* Prefixed Masked VSX Vector 16-bit Floating-Point
                           GER Negative multiply, Positive accumulate MMIRR,
-                          pmxvf16ger2np */
+                          pmdmxvf16ger2np (pmxvf16ger2np)  */
          case 210:     /* Prefixed Masked VSX Vector 16-bit Floating-Point
                           GER Negative multiply, Negative accumulate MMIRR,
-                          pmxvf16ger2nn */
+                          pmdmxvf16ger2nn (pmxvf16ger2nn)  */
 
          case 27:      /* Prefixed Masked VSX Vector 32-bit Floating-Point
-                          GER MMIRR, pmxvf32ger */
+                          GER MMIRR, pmdmxvf32ger (pmxvf32ger)  */
          case 26:      /* Prefixed Masked VSX Vector 32-bit Floating-Point
                           GER Positive multiply, Positive accumulate MMIRR,
-                          pmxvf32gerpp */
+                          pmdmxvf32gerpp (pmxvf32gerpp)  */
          case 154:     /* Prefixed Masked VSX Vector 32-bit Floating-Point
                           GER Positive multiply, Negative accumulate MMIRR,
-                          pmxvf32gerpn */
+                          pmdmxvf32gerpn (pmxvf32gerpn)  */
          case 90:      /* Prefixed Masked VSX Vector 32-bit Floating-Point
                           GER Negative multiply, Positive accumulate MMIRR,
-                          pmxvf32gernp */
+                          pmdmxvf32gernp (pmxvf32gernp )*/
          case 218:     /* Prefixed Masked VSX Vector 32-bit Floating-Point
                           GER Negative multiply, Negative accumulate MMIRR,
-                          pmxvf32gernn */
+                          pmdmxvf32gernn (pmxvf32gernn)  */
 
          case 59:      /* Prefixed Masked VSX Vector 64-bit Floating-Point
-                          GER MMIRR, pmxvf64ger */
+                          GER MMIRR, pmdmxvf64ger (pmxvf64ger)  */
          case 58:      /* Floating-Point GER Positive multiply, Positive
-                          accumulate MMIRR, pmxvf64gerpp */
+                          accumulate MMIRR, pmdmxvf64gerpp (pmxvf64gerpp)  */
          case 186:     /* Prefixed Masked VSX Vector 64-bit Floating-Point
                           GER Positive multiply, Negative accumulate MMIRR,
-                          pmxvf64gerpn */
+                          pmdmxvf64gerpn (pmxvf64gerpn)  */
          case 122:     /* Prefixed Masked VSX Vector 64-bit Floating-Point
                           GER Negative multiply, Positive accumulate MMIRR,
-                          pmxvf64gernp */
+                          pmdmxvf64gernp (pmxvf64gernp)  */
          case 250:     /* Prefixed Masked VSX Vector 64-bit Floating-Point
                           GER Negative multiply, Negative accumulate MMIRR,
-                          pmxvf64gernn */
+                          pmdmxvf64gernn (pmxvf64gernn)  */
 
          case 51:      /* Prefixed Masked VSX Vector bfloat16 GER MMIRR,
-                          pmxvbf16ger2 */
+                          pmdmxvbf16ger2 (pmxvbf16ger2)  */
          case 50:      /* Prefixed Masked VSX Vector bfloat16 GER Positive
                           multiply, Positive accumulate MMIRR,
-                          pmxvbf16ger2pp */
+                          pmdmxvbf16ger2pp (pmxvbf16ger2pp)  */
          case 178:     /* Prefixed Masked VSX Vector bfloat16 GER Positive
                           multiply, Negative accumulate MMIRR,
-                          pmxvbf16ger2pn */
+                          pmdmxvbf16ger2pn (pmxvbf16ger2pn)  */
          case 114:     /* Prefixed Masked VSX Vector bfloat16 GER Negative
                           multiply, Positive accumulate MMIRR,
-                          pmxvbf16ger2np */
+                          pmdmxvbf16ger2np (pmxvbf16ger2np)  */
          case 242:     /* Prefixed Masked VSX Vector bfloat16 GER Negative
                           multiply, Negative accumulate MMIRR,
-                          pmxvbf16ger2nn */
+                          pmdmxvbf16ger2nn (pmxvbf16ger2nn)  */
            ppc_record_ACC_fpscr (regcache, tdep, at, true);
            return 0;
          }
index c0d65d944afb81b50807bf0ae84b555774c02b2f..e44645e0f581159a423087b8565333e7fdee58ef 100644 (file)
@@ -22,6 +22,13 @@ static unsigned long ra, rb, rs;
 int
 main ()
 {
+
+  /* This test is used to verify the recording of the MMA instructions.  The
+     names of the MMA instructions pmxbf16ger*, pmxvf32ger*,pmxvf64ger*,
+     pmxvi4ger8*, pmxvi8ger4* pmxvi16ger2* instructions were officially changed
+     to pmdmxbf16ger*, pmdmxvf32ger*, pmdmxvf64ger*, pmdmxvi4ger8*,
+     pmdmxvi8ger4*, pmdmxvi16ger* respectively.  The old mnemonics are used in
+     this test for backward compatibity.   */
   ra = 0xABCDEF012;
   rb = 0;
   rs = 0x012345678;
@@ -87,6 +94,7 @@ main ()
                        "wa" (vec_xb) );
   __asm__ __volatile__ ("xvf16ger2pn 5, %x0, %x1" :: "wa" (vec_xa),\
                        "wa" (vec_xb) );
+  /* Use the older instruction name for backward compatibility */
   __asm__ __volatile__ ("pmxvi8ger4spp  6, %x0, %x1, 11, 13, 5"
                                 :: "wa" (vec_xa), "wa" (vec_xb) );
   __asm__ __volatile__ ("pmxvf32gerpp  7, %x0, %x1, 11, 13"
index 8cecb0676677cc9f9dabd4300433cf0be707a0b5..79f04f65b6441ec424c40a2d213302c526cd22f4 100644 (file)
@@ -124,6 +124,11 @@ gdb_test_no_output "record" "start recording test2"
 ##       pmxvi8ger4   - ACC[6], vs[21] to vs[27]
 ##       pmxvf32gerpp - ACC[7], vs[28] to vs[31] and fpscr
 
+## Note the names for pmxvi8ger4 and pmxvf32gerpp have been officially
+## changed to pmdmxvi8ger4 and pmdmxvf32gerpp respectively.  The older
+## names are still supported by the assembler as extended mnemonics.  The
+## older names are used in this test for backward compatibility.
+
 set stop3 [gdb_get_line_number "stop 3"]
 set stop4 [gdb_get_line_number "stop 4"]