/* Kernel helper for compare-and-exchange a 32-bit value. */
static inline long
-__kernel_cmpxchg (int oldval, int newval, int *mem)
+__kernel_cmpxchg (int *mem, int oldval, int newval)
{
register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
- register long lws_ret asm("r28");
- register long lws_errno asm("r21");
register int lws_old asm("r25") = oldval;
register int lws_new asm("r24") = newval;
+ register long lws_ret asm("r28");
+ register long lws_errno asm("r21");
asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
- "ldi %5, %%r20 \n\t"
- : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
- "=r" (lws_old), "=r" (lws_new)
- : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
+ "ldi %2, %%r20 \n\t"
+ : "=r" (lws_ret), "=r" (lws_errno)
+ : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
: "r1", "r20", "r22", "r23", "r29", "r31", "memory"
);
if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
}
static inline long
-__kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem,
+__kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
int val_size)
{
register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
- register long lws_ret asm("r28");
- register long lws_errno asm("r21");
register unsigned long lws_old asm("r25") = (unsigned long) oldval;
register unsigned long lws_new asm("r24") = (unsigned long) newval;
register int lws_size asm("r23") = val_size;
+ register long lws_ret asm("r28");
+ register long lws_errno asm("r21");
asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
- "ldi %2, %%r20 \n\t"
- : "=r" (lws_ret), "=r" (lws_errno)
- : "i" (2), "r" (lws_mem), "r" (lws_old), "r" (lws_new), "r" (lws_size)
+ "ldi %6, %%r20 \n\t"
+ : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem),
+ "+r" (lws_old), "+r" (lws_new), "+r" (lws_size)
+ : "i" (2)
: "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
);
if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
newval = PFX_OP (tmp INF_OP val); \
- failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \
+ failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
} while (failure != 0); \
\
return tmp; \
do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
newval = PFX_OP (tmp INF_OP val); \
- failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \
+ failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
} while (failure != 0); \
\
return PFX_OP (tmp INF_OP val); \
\
do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
- failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
} while (failure != 0); \
\
return tmp; \
\
do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
- failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
} while (failure != 0); \
\
return PFX_OP (tmp INF_OP val); \
if (__builtin_expect (oldval != actual_oldval, 0)) \
return actual_oldval; \
\
- fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \
+ fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
\
if (__builtin_expect (!fail, 1)) \
return actual_oldval; \
__sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
TYPE newval) \
{ \
- int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX); \
+ int failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
return (failure != 0); \
}
if (__builtin_expect (oldval != actual_oldval, 0))
return actual_oldval;
- fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
+ fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
if (__builtin_expect (!fail, 1))
return actual_oldval;
bool HIDDEN
__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
- int failure = __kernel_cmpxchg (oldval, newval, ptr);
+ int failure = __kernel_cmpxchg (ptr, oldval, newval);
return (failure == 0);
}
\
do { \
oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
- failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX); \
+ failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
} while (failure != 0); \
\
return oldval; \
do {
oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
- failure = __kernel_cmpxchg (oldval, val, ptr);
+ failure = __kernel_cmpxchg (ptr, oldval, val);
} while (failure != 0);
return oldval;
\
do { \
oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
- failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX); \
+ failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX); \
} while (failure != 0); \
}
int failure, oldval;
do {
- oldval = *ptr;
- failure = __kernel_cmpxchg (oldval, 0, ptr);
+ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
+ failure = __kernel_cmpxchg (ptr, oldval, 0);
} while (failure != 0);
}