0+ <.text>:
0: db 12 1e ef 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
- 8: c3 12 1e ef 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 8: c3 12 1e ef 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=w2
10: db 12 1e ef 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
- 18: c3 12 1e ef 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 18: c3 12 1e ef 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=w2
20: db 12 1e ef 00 00 00 50 lock \*\(u64\*\)\(r1\+0x1eef\)\&=r2
- 28: c3 12 1e ef 00 00 00 50 lock \*\(u32\*\)\(r1\+0x1eef\)\&=r2
+ 28: c3 12 1e ef 00 00 00 50 lock \*\(u32\*\)\(r1\+0x1eef\)\&=w2
30: db 12 1e ef 00 00 00 40 lock \*\(u64\*\)\(r1\+0x1eef\)\|=r2
- 38: c3 12 1e ef 00 00 00 40 lock \*\(u32\*\)\(r1\+0x1eef\)\|=r2
+ 38: c3 12 1e ef 00 00 00 40 lock \*\(u32\*\)\(r1\+0x1eef\)\|=w2
40: db 12 1e ef 00 00 00 a0 lock \*\(u64\*\)\(r1\+0x1eef\)\^=r2
- 48: c3 12 1e ef 00 00 00 a0 lock \*\(u32\*\)\(r1\+0x1eef\)\^=r2
+ 48: c3 12 1e ef 00 00 00 a0 lock \*\(u32\*\)\(r1\+0x1eef\)\^=w2
50: db 12 1e ef 00 00 00 01 r2=atomic_fetch_add\(\(u64\*\)\(r1\+0x1eef\),r2\)
58: c3 12 1e ef 00 00 00 01 w2=atomic_fetch_add\(\(u32\*\)\(r1\+0x1eef\),w2\)
60: db 12 1e ef 00 00 00 51 r2=atomic_fetch_and\(\(u64\*\)\(r1\+0x1eef\),r2\)
0+ <.text>:
0: db 21 ef 1e 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
- 8: c3 21 ef 1e 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 8: c3 21 ef 1e 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=w2
10: db 21 ef 1e 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
- 18: c3 21 ef 1e 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 18: c3 21 ef 1e 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=w2
20: db 21 ef 1e 50 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\&=r2
- 28: c3 21 ef 1e 50 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\&=r2
+ 28: c3 21 ef 1e 50 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\&=w2
30: db 21 ef 1e 40 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\|=r2
- 38: c3 21 ef 1e 40 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\|=r2
+ 38: c3 21 ef 1e 40 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\|=w2
40: db 21 ef 1e a0 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\^=r2
- 48: c3 21 ef 1e a0 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\^=r2
+ 48: c3 21 ef 1e a0 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\^=w2
50: db 21 ef 1e 01 00 00 00 r2=atomic_fetch_add\(\(u64\*\)\(r1\+0x1eef\),r2\)
58: c3 21 ef 1e 01 00 00 00 w2=atomic_fetch_add\(\(u32\*\)\(r1\+0x1eef\),w2\)
60: db 21 ef 1e 51 00 00 00 r2=atomic_fetch_and\(\(u64\*\)\(r1\+0x1eef\),r2\)
# Test for eBPF atomic pseudo-C instructions.
.text
lock *(u64 *)(r1 + 0x1eef) += r2
- lock *(u32 *)(r1 + 0x1eef) += r2
+ lock *(u32 *)(r1 + 0x1eef) += w2
lock *(u64*)(r1+0x1eef)+=r2
- lock *(u32*)(r1+0x1eef)+=r2
+ lock *(u32*)(r1+0x1eef)+=w2
lock *(u64*)(r1+0x1eef)&=r2
- lock *(u32*)(r1+0x1eef)&=r2
+ lock *(u32*)(r1+0x1eef)&=w2
lock *(u64*)(r1+0x1eef)|=r2
- lock *(u32*)(r1+0x1eef)|=r2
+ lock *(u32*)(r1+0x1eef)|=w2
lock *(u64*)(r1+0x1eef)^=r2
- lock *(u32*)(r1+0x1eef)^=r2
+ lock *(u32*)(r1+0x1eef)^=w2
r2 = atomic_fetch_add((u64*)(r1+0x1eef),r2)
w2 = atomic_fetch_add((u32*)(r1+0x1eef),w2)
r2 = atomic_fetch_and((u64*)(r1+0x1eef),r2)
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AFXOR},
/* Atomic instructions (32-bit.) */
- {BPF_INSN_AADD32, "aadd32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) += %sr",
+ {BPF_INSN_AADD32, "aadd32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) += %sw",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AADD},
- {BPF_INSN_AOR32, "aor32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) |= %sr",
+ {BPF_INSN_AOR32, "aor32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) |= %sw",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AOR},
- {BPF_INSN_AAND32, "aand32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) &= %sr",
+ {BPF_INSN_AAND32, "aand32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) &= %sw",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AAND},
- {BPF_INSN_AXOR32, "axor32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) ^= %sr",
+ {BPF_INSN_AXOR32, "axor32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) ^= %sw",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AXOR},
/* Atomic instructions with fetching (32-bit.) */