linux-atomic.c (__kernel_cmpxchg): Reorder arguments to better match light-weight...
authorJohn David Anglin <danglin@gcc.gnu.org>
Wed, 1 Jul 2015 17:42:20 +0000 (17:42 +0000)
committerJohn David Anglin <danglin@gcc.gnu.org>
Wed, 1 Jul 2015 17:42:20 +0000 (17:42 +0000)
* config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to
better match light-weight syscall argument order.
(__kernel_cmpxchg2): Likewise.
Adjust callers.

From-SVN: r225267

libgcc/ChangeLog
libgcc/config/pa/linux-atomic.c

index 72c8c7afbb473ba67f213f793688f62bfc6ae7e3..be1fbab2cb62b6e28580eba799822f4d494530b3 100644 (file)
@@ -1,3 +1,10 @@
+2015-07-01  John David Anglin  <danglin@gcc.gnu.org>
+
+       * config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to
+       better match light-weight syscall argument order.
+       (__kernel_cmpxchg2): Likewise.
+       Adjust callers.
+
 2015-06-30  H.J. Lu  <hongjiu.lu@intel.com>
 
        * config.host: Support i[34567]86-*-elfiamcu target.
index 5d99071fee93e576ebf6a29bc0c2e0a0a0041935..750cb8b13c122291522b356777948dd90498e317 100644 (file)
@@ -46,18 +46,17 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
 
 /* Kernel helper for compare-and-exchange a 32-bit value.  */
 static inline long
-__kernel_cmpxchg (int oldval, int newval, int *mem)
+__kernel_cmpxchg (int *mem, int oldval, int newval)
 {
   register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
-  register long lws_ret   asm("r28");
-  register long lws_errno asm("r21");
   register int lws_old asm("r25") = oldval;
   register int lws_new asm("r24") = newval;
+  register long lws_ret   asm("r28");
+  register long lws_errno asm("r21");
   asm volatile (       "ble    0xb0(%%sr2, %%r0)       \n\t"
-                       "ldi    %5, %%r20               \n\t"
-       : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
-         "=r" (lws_old), "=r" (lws_new)
-       : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
+                       "ldi    %2, %%r20               \n\t"
+       : "=r" (lws_ret), "=r" (lws_errno)
+       : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
        : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
   );
   if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
@@ -73,19 +72,20 @@ __kernel_cmpxchg (int oldval, int newval, int *mem)
 }
 
 static inline long
-__kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem,
+__kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
                   int val_size)
 {
   register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
-  register long lws_ret   asm("r28");
-  register long lws_errno asm("r21");
   register unsigned long lws_old asm("r25") = (unsigned long) oldval;
   register unsigned long lws_new asm("r24") = (unsigned long) newval;
   register int lws_size asm("r23") = val_size;
+  register long lws_ret   asm("r28");
+  register long lws_errno asm("r21");
   asm volatile (       "ble    0xb0(%%sr2, %%r0)       \n\t"
-                       "ldi    %2, %%r20               \n\t"
-       : "=r" (lws_ret), "=r" (lws_errno)
-       : "i" (2), "r" (lws_mem), "r" (lws_old), "r" (lws_new), "r" (lws_size)
+                       "ldi    %6, %%r20               \n\t"
+       : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem),
+         "+r" (lws_old), "+r" (lws_new), "+r" (lws_size)
+       : "i" (2)
        : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
   );
   if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
@@ -116,7 +116,7 @@ __kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem,
     do {                                                               \
       tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);                   \
       newval = PFX_OP (tmp INF_OP val);                                        \
-      failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX);         \
+      failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX);         \
     } while (failure != 0);                                            \
                                                                        \
     return tmp;                                                                \
@@ -146,7 +146,7 @@ FETCH_AND_OP_2 (nand, ~, &, signed char, 1, 0)
     do {                                                               \
       tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);                   \
       newval = PFX_OP (tmp INF_OP val);                                        \
-      failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX);         \
+      failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX);         \
     } while (failure != 0);                                            \
                                                                        \
     return PFX_OP (tmp INF_OP val);                                    \
@@ -174,7 +174,7 @@ OP_AND_FETCH_2 (nand, ~, &, signed char, 1, 0)
                                                                        \
     do {                                                               \
       tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);                   \
-      failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr);  \
+      failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val));  \
     } while (failure != 0);                                            \
                                                                        \
     return tmp;                                                                \
@@ -195,7 +195,7 @@ FETCH_AND_OP_WORD (nand, ~, &)
                                                                        \
     do {                                                               \
       tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);                   \
-      failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr);  \
+      failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val));  \
     } while (failure != 0);                                            \
                                                                        \
     return PFX_OP (tmp INF_OP val);                                    \
@@ -225,7 +225,7 @@ typedef unsigned char bool;
        if (__builtin_expect (oldval != actual_oldval, 0))              \
          return actual_oldval;                                         \
                                                                        \
-       fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \
+       fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
                                                                        \
        if (__builtin_expect (!fail, 1))                                \
          return actual_oldval;                                         \
@@ -236,7 +236,7 @@ typedef unsigned char bool;
   __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval,                \
                                        TYPE newval)                    \
   {                                                                    \
-    int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX);    \
+    int failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX);    \
     return (failure != 0);                                             \
   }
 
@@ -255,7 +255,7 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
       if (__builtin_expect (oldval != actual_oldval, 0))
        return actual_oldval;
 
-      fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
+      fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
   
       if (__builtin_expect (!fail, 1))
        return actual_oldval;
@@ -265,7 +265,7 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
 bool HIDDEN
 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
 {
-  int failure = __kernel_cmpxchg (oldval, newval, ptr);
+  int failure = __kernel_cmpxchg (ptr, oldval, newval);
   return (failure == 0);
 }
 
@@ -278,7 +278,7 @@ TYPE HIDDEN                                                         \
                                                                        \
     do {                                                               \
       oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);                        \
-      failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX);         \
+      failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX);         \
     } while (failure != 0);                                            \
                                                                        \
     return oldval;                                                     \
@@ -294,7 +294,7 @@ __sync_lock_test_and_set_4 (int *ptr, int val)
 
   do {
     oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
-    failure = __kernel_cmpxchg (oldval, val, ptr);
+    failure = __kernel_cmpxchg (ptr, oldval, val);
   } while (failure != 0);
 
   return oldval;
@@ -308,7 +308,7 @@ __sync_lock_test_and_set_4 (int *ptr, int val)
                                                                \
     do {                                                       \
       oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);                \
-      failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX);        \
+      failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX);        \
     } while (failure != 0);                                    \
   }
 
@@ -321,7 +321,7 @@ __sync_lock_release_4 (int *ptr)
   int failure, oldval;
 
   do {
-    oldval = *ptr;
-    failure = __kernel_cmpxchg (oldval, 0, ptr);
+    oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
+    failure = __kernel_cmpxchg (ptr, oldval, 0);
   } while (failure != 0);
 }