From: John David Anglin Date: Wed, 1 Jul 2015 17:42:20 +0000 (+0000) Subject: linux-atomic.c (__kernel_cmpxchg): Reorder arguments to better match light-weight... X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=f9a12f7b8fd56359f1eee124309fc783f83466b5;p=gcc.git linux-atomic.c (__kernel_cmpxchg): Reorder arguments to better match light-weight syscall argument order. * config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to better match light-weight syscall argument order. (__kernel_cmpxchg2): Likewise. Adjust callers. From-SVN: r225267 --- diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog index 72c8c7afbb4..be1fbab2cb6 100644 --- a/libgcc/ChangeLog +++ b/libgcc/ChangeLog @@ -1,3 +1,10 @@ +2015-07-01 John David Anglin + + * config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to + better match light-weight syscall argument order. + (__kernel_cmpxchg2): Likewise. + Adjust callers. + 2015-06-30 H.J. Lu * config.host: Support i[34567]86-*-elfiamcu target. diff --git a/libgcc/config/pa/linux-atomic.c b/libgcc/config/pa/linux-atomic.c index 5d99071fee9..750cb8b13c1 100644 --- a/libgcc/config/pa/linux-atomic.c +++ b/libgcc/config/pa/linux-atomic.c @@ -46,18 +46,17 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see /* Kernel helper for compare-and-exchange a 32-bit value. */ static inline long -__kernel_cmpxchg (int oldval, int newval, int *mem) +__kernel_cmpxchg (int *mem, int oldval, int newval) { register unsigned long lws_mem asm("r26") = (unsigned long) (mem); - register long lws_ret asm("r28"); - register long lws_errno asm("r21"); register int lws_old asm("r25") = oldval; register int lws_new asm("r24") = newval; + register long lws_ret asm("r28"); + register long lws_errno asm("r21"); asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" - "ldi %5, %%r20 \n\t" - : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem), - "=r" (lws_old), "=r" (lws_new) - : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new) + "ldi %2, %%r20 \n\t" + : "=r" (lws_ret), "=r" (lws_errno) + : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new) : "r1", "r20", "r22", "r23", "r29", "r31", "memory" ); if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) @@ -73,19 +72,20 @@ __kernel_cmpxchg (int oldval, int newval, int *mem) } static inline long -__kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem, +__kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval, int val_size) { register unsigned long lws_mem asm("r26") = (unsigned long) (mem); - register long lws_ret asm("r28"); - register long lws_errno asm("r21"); register unsigned long lws_old asm("r25") = (unsigned long) oldval; register unsigned long lws_new asm("r24") = (unsigned long) newval; register int lws_size asm("r23") = val_size; + register long lws_ret asm("r28"); + register long lws_errno asm("r21"); asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" - "ldi %2, %%r20 \n\t" - : "=r" (lws_ret), "=r" (lws_errno) - : "i" (2), "r" (lws_mem), "r" (lws_old), "r" (lws_new), "r" (lws_size) + "ldi %6, %%r20 \n\t" + : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem), + "+r" (lws_old), "+r" (lws_new), "+r" (lws_size) + : "i" (2) : "r1", "r20", "r22", "r29", "r31", "fr4", "memory" ); if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) @@ -116,7 +116,7 @@ __kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem, do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ newval = PFX_OP (tmp INF_OP val); \ - failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \ } while (failure != 0); \ \ return tmp; \ @@ -146,7 +146,7 @@ FETCH_AND_OP_2 (nand, ~, &, signed char, 1, 0) do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ newval = PFX_OP (tmp INF_OP val); \ - failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \ } while (failure != 0); \ \ return PFX_OP (tmp INF_OP val); \ @@ -174,7 +174,7 @@ OP_AND_FETCH_2 (nand, ~, &, signed char, 1, 0) \ do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ + failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \ } while (failure != 0); \ \ return tmp; \ @@ -195,7 +195,7 @@ FETCH_AND_OP_WORD (nand, ~, &) \ do { \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ + failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \ } while (failure != 0); \ \ return PFX_OP (tmp INF_OP val); \ @@ -225,7 +225,7 @@ typedef unsigned char bool; if (__builtin_expect (oldval != actual_oldval, 0)) \ return actual_oldval; \ \ - fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \ + fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \ \ if (__builtin_expect (!fail, 1)) \ return actual_oldval; \ @@ -236,7 +236,7 @@ typedef unsigned char bool; __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ TYPE newval) \ { \ - int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX); \ + int failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \ return (failure != 0); \ } @@ -255,7 +255,7 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval) if (__builtin_expect (oldval != actual_oldval, 0)) return actual_oldval; - fail = __kernel_cmpxchg (actual_oldval, newval, ptr); + fail = __kernel_cmpxchg (ptr, actual_oldval, newval); if (__builtin_expect (!fail, 1)) return actual_oldval; @@ -265,7 +265,7 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval) bool HIDDEN __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval) { - int failure = __kernel_cmpxchg (oldval, newval, ptr); + int failure = __kernel_cmpxchg (ptr, oldval, newval); return (failure == 0); } @@ -278,7 +278,7 @@ TYPE HIDDEN \ \ do { \ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \ } while (failure != 0); \ \ return oldval; \ @@ -294,7 +294,7 @@ __sync_lock_test_and_set_4 (int *ptr, int val) do { oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); - failure = __kernel_cmpxchg (oldval, val, ptr); + failure = __kernel_cmpxchg (ptr, oldval, val); } while (failure != 0); return oldval; @@ -308,7 +308,7 @@ __sync_lock_test_and_set_4 (int *ptr, int val) \ do { \ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ - failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX); \ + failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX); \ } while (failure != 0); \ } @@ -321,7 +321,7 @@ __sync_lock_release_4 (int *ptr) int failure, oldval; do { - oldval = *ptr; - failure = __kernel_cmpxchg (oldval, 0, ptr); + oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); + failure = __kernel_cmpxchg (ptr, oldval, 0); } while (failure != 0); }