@table @code
@item aadd [rd + offset16], rs
-@itemx *(u64 *)(rd + offset16) = rs
+@itemx lock *(u64 *)(rd + offset16) = rs
Atomic add instruction.
@item aor [rd + offset16], rs
-@itemx *(u64 *) (rd + offset16) |= rs
+@itemx lock *(u64 *) (rd + offset16) |= rs
Atomic or instruction.
@item aand [rd + offset16], rs
-@itemx *(u64 *) (rd + offset16) &= rs
+@itemx lock *(u64 *) (rd + offset16) &= rs
Atomic and instruction.
@item axor [rd + offset16], rs
-@itemx *(u64 *) (rd + offset16) ^= rs
-Atomic xor instruction
-@item xaddw [%d+offset16],%s
-Exchange-and-add a 32-bit value at the specified location.
+@itemx lock *(u64 *) (rd + offset16) ^= rs
+Atomic xor instruction.
@end table
@noindent
The following variants perform fetching before the atomic operation.
@table @code
-@item afadd [dr + offset16], rs
-@itemx ???
+@item afadd [rd + offset16], rs
+@itemx rs = atomic_fetch_add ((u64 *)(rd + offset16), rs)
Atomic fetch-and-add instruction.
-@item afor [dr + offset16], rs
-@itemx ???
+@item afor [rd + offset16], rs
+@itemx rs = atomic_fetch_or ((u64 *)(rd + offset16), rs)
Atomic fetch-and-or instruction.
-@item afand [dr + offset16], rs
-@itemx ???
+@item afand [rd + offset16], rs
+@itemx rs = atomic_fetch_and ((u64 *)(rd + offset16), rs)
Atomic fetch-and-and instruction.
-@item afxor [dr + offset16], rs
-@itemx ???
-Atomic fetch-and-or instruction
+@item afxor [rd + offset16], rs
+@itemx rs = atomic_fetch_xor ((u64 *)(rd + offset16), rs)
+Atomic fetch-and-or instruction.
@end table
The above instructions were introduced in the V3 of the BPF
@table @code
@item aadd32 [rd + offset16], rs
-@itemx *(u32 *)(rd + offset16) = rs
+@itemx lock *(u32 *)(rd + offset16) = rs
Atomic add instruction.
@item aor32 [rd + offset16], rs
-@itemx *(u32 *) (rd + offset16) |= rs
+@itemx lock *(u32 *) (rd + offset16) |= rs
Atomic or instruction.
@item aand32 [rd + offset16], rs
-@itemx *(u32 *) (rd + offset16) &= rs
+@itemx lock *(u32 *) (rd + offset16) &= rs
Atomic and instruction.
@item axor32 [rd + offset16], rs
-@itemx *(u32 *) (rd + offset16) ^= rs
+@itemx lock *(u32 *) (rd + offset16) ^= rs
Atomic xor instruction
@end table
@table @code
@item afadd32 [dr + offset16], rs
-@itemx ???
+@itemx ws = atomic_fetch_add ((u32 *)(rd + offset16), ws)
Atomic fetch-and-add instruction.
@item afor32 [dr + offset16], rs
-@itemx ???
+@itemx ws = atomic_fetch_or ((u32 *)(rd + offset16), ws)
Atomic fetch-and-or instruction.
@item afand32 [dr + offset16], rs
-@itemx ???
+@itemx ws = atomic_fetch_and ((u32 *)(rd + offset16), ws)
Atomic fetch-and-and instruction.
@item afxor32 [dr + offset16], rs
-@itemx ???
+@itemx ws = atomic_fetch_xor ((u32 *)(rd + offset16), ws)
Atomic fetch-and-or instruction
@end table
Disassembly of section .text:
0+ <.text>:
- 0: db 12 1e ef 00 00 00 00 \*\(u64\*\)\(r1\+0x1eef\)\+=r2
- 8: c3 12 1e ef 00 00 00 00 \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 0: db 12 1e ef 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
+ 8: c3 12 1e ef 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 10: db 12 1e ef 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
+ 18: c3 12 1e ef 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 20: db 12 1e ef 00 00 00 50 lock \*\(u64\*\)\(r1\+0x1eef\)\&=r2
+ 28: c3 12 1e ef 00 00 00 50 lock \*\(u32\*\)\(r1\+0x1eef\)\&=r2
+ 30: db 12 1e ef 00 00 00 40 lock \*\(u64\*\)\(r1\+0x1eef\)\|=r2
+ 38: c3 12 1e ef 00 00 00 40 lock \*\(u32\*\)\(r1\+0x1eef\)\|=r2
+ 40: db 12 1e ef 00 00 00 a0 lock \*\(u64\*\)\(r1\+0x1eef\)\^=r2
+ 48: c3 12 1e ef 00 00 00 a0 lock \*\(u32\*\)\(r1\+0x1eef\)\^=r2
+ 50: db 12 1e ef 00 00 00 01 r2=atomic_fetch_add\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 58: c3 12 1e ef 00 00 00 01 w2=atomic_fetch_add\(\(u32\*\)\(r1\+0x1eef\),w2\)
+ 60: db 12 1e ef 00 00 00 51 r2=atomic_fetch_and\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 68: c3 12 1e ef 00 00 00 51 w2=atomic_fetch_and\(\(u32\*\)\(r1\+0x1eef\),w2\)
+ 70: db 12 1e ef 00 00 00 41 r2=atomic_fetch_or\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 78: c3 12 1e ef 00 00 00 41 w2=atomic_fetch_or\(\(u32\*\)\(r1\+0x1eef\),w2\)
+ 80: db 12 1e ef 00 00 00 a1 r2=atomic_fetch_xor\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 88: c3 12 1e ef 00 00 00 a1 w2=atomic_fetch_xor\(\(u32\*\)\(r1\+0x1eef\),w2\)
#as: -EB -mdialect=normal
#source: atomic.s
-#objdump: -dr -M hex,v1
+#objdump: -dr -M hex
#name: eBPF atomic instructions, big endian
.*: +file format .*bpf.*
Disassembly of section .text:
0+ <.text>:
- 0: db 12 1e ef 00 00 00 00 xadddw \[%r1\+0x1eef\],%r2
- 8: c3 12 1e ef 00 00 00 00 xaddw \[%r1\+0x1eef\],%r2
+ 0: db 12 1e ef 00 00 00 00 aadd \[%r1\+0x1eef\],%r2
+ 8: c3 12 1e ef 00 00 00 00 aadd32 \[%r1\+0x1eef\],%r2
+ 10: db 12 1e ef 00 00 00 50 aand \[%r1\+0x1eef\],%r2
+ 18: c3 12 1e ef 00 00 00 50 aand32 \[%r1\+0x1eef\],%r2
+ 20: db 12 1e ef 00 00 00 40 aor \[%r1\+0x1eef\],%r2
+ 28: c3 12 1e ef 00 00 00 40 aor32 \[%r1\+0x1eef\],%r2
+ 30: db 12 1e ef 00 00 00 a0 axor \[%r1\+0x1eef\],%r2
+ 38: c3 12 1e ef 00 00 00 a0 axor32 \[%r1\+0x1eef\],%r2
+ 40: db 12 1e ef 00 00 00 01 afadd \[%r1\+0x1eef\],%r2
+ 48: c3 12 1e ef 00 00 00 01 afadd32 \[%r1\+0x1eef\],%r2
+ 50: db 12 1e ef 00 00 00 51 afand \[%r1\+0x1eef\],%r2
+ 58: c3 12 1e ef 00 00 00 51 afand32 \[%r1\+0x1eef\],%r2
+ 60: db 12 1e ef 00 00 00 41 afor \[%r1\+0x1eef\],%r2
+ 68: c3 12 1e ef 00 00 00 41 afor32 \[%r1\+0x1eef\],%r2
+ 70: db 12 1e ef 00 00 00 a1 afxor \[%r1\+0x1eef\],%r2
+ 78: c3 12 1e ef 00 00 00 a1 afxor32 \[%r1\+0x1eef\],%r2
Disassembly of section .text:
0+ <.text>:
- 0: db 21 ef 1e 00 00 00 00 \*\(u64\*\)\(r1\+0x1eef\)\+=r2
- 8: c3 21 ef 1e 00 00 00 00 \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 0: db 21 ef 1e 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
+ 8: c3 21 ef 1e 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 10: db 21 ef 1e 00 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\+=r2
+ 18: c3 21 ef 1e 00 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\+=r2
+ 20: db 21 ef 1e 50 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\&=r2
+ 28: c3 21 ef 1e 50 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\&=r2
+ 30: db 21 ef 1e 40 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\|=r2
+ 38: c3 21 ef 1e 40 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\|=r2
+ 40: db 21 ef 1e a0 00 00 00 lock \*\(u64\*\)\(r1\+0x1eef\)\^=r2
+ 48: c3 21 ef 1e a0 00 00 00 lock \*\(u32\*\)\(r1\+0x1eef\)\^=r2
+ 50: db 21 ef 1e 01 00 00 00 r2=atomic_fetch_add\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 58: c3 21 ef 1e 01 00 00 00 w2=atomic_fetch_add\(\(u32\*\)\(r1\+0x1eef\),w2\)
+ 60: db 21 ef 1e 51 00 00 00 r2=atomic_fetch_and\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 68: c3 21 ef 1e 51 00 00 00 w2=atomic_fetch_and\(\(u32\*\)\(r1\+0x1eef\),w2\)
+ 70: db 21 ef 1e 41 00 00 00 r2=atomic_fetch_or\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 78: c3 21 ef 1e 41 00 00 00 w2=atomic_fetch_or\(\(u32\*\)\(r1\+0x1eef\),w2\)
+ 80: db 21 ef 1e a1 00 00 00 r2=atomic_fetch_xor\(\(u64\*\)\(r1\+0x1eef\),r2\)
+ 88: c3 21 ef 1e a1 00 00 00 w2=atomic_fetch_xor\(\(u32\*\)\(r1\+0x1eef\),w2\)
- # Test for eBPF ADDW and ADDDW pseudo-C instructions
+ # Test for eBPF atomic pseudo-C instructions.
.text
- *(u64 *)(r1 + 7919) += r2
- *(u32 *)(r1 + 7919) += r2
+ lock *(u64 *)(r1 + 0x1eef) += r2
+ lock *(u32 *)(r1 + 0x1eef) += r2
+ lock *(u64*)(r1+0x1eef)+=r2
+ lock *(u32*)(r1+0x1eef)+=r2
+ lock *(u64*)(r1+0x1eef)&=r2
+ lock *(u32*)(r1+0x1eef)&=r2
+ lock *(u64*)(r1+0x1eef)|=r2
+ lock *(u32*)(r1+0x1eef)|=r2
+ lock *(u64*)(r1+0x1eef)^=r2
+ lock *(u32*)(r1+0x1eef)^=r2
+ r2 = atomic_fetch_add((u64*)(r1+0x1eef),r2)
+ w2 = atomic_fetch_add((u32*)(r1+0x1eef),w2)
+ r2 = atomic_fetch_and((u64*)(r1+0x1eef),r2)
+ w2 = atomic_fetch_and((u32*)(r1+0x1eef),w2)
+ r2 = atomic_fetch_or((u64*)(r1+0x1eef),r2)
+ w2 = atomic_fetch_or((u32*)(r1+0x1eef),w2)
+ r2 = atomic_fetch_xor((u64*)(r1+0x1eef),r2)
+ w2 = atomic_fetch_xor((u32*)(r1+0x1eef),w2)
--- /dev/null
+#as: -EB -mdialect=normal
+#source: atomic-v1.s
+#objdump: -dr -M hex,v1
+#name: eBPF atomic instructions, big endian
+
+.*: +file format .*bpf.*
+
+Disassembly of section .text:
+
+0+ <.text>:
+ 0: db 12 1e ef 00 00 00 00 xadddw \[%r1\+0x1eef\],%r2
+ 8: c3 12 1e ef 00 00 00 00 xaddw \[%r1\+0x1eef\],%r2
--- /dev/null
+#as: -EL -mdialect=normal
+#source: atomic-v1.s
+#objdump: -dr -M hex,v1
+#name: eBPF atomic instructions, little endian
+
+.*: +file format .*bpf.*
+
+Disassembly of section .text:
+
+0+ <.text>:
+ 0: db 21 ef 1e 00 00 00 00 xadddw \[%r1\+0x1eef\],%r2
+ 8: c3 21 ef 1e 00 00 00 00 xaddw \[%r1\+0x1eef\],%r2
--- /dev/null
+
+ # Test for eBPF XADDW and XADDDW instructions.
+ .text
+ xadddw [%r1+0x1eef], %r2
+ xaddw [%r1+0x1eef], %r2
0+ <.text>:
0: db 21 ef 1e 00 00 00 00 aadd \[%r1\+0x1eef\],%r2
8: c3 21 ef 1e 00 00 00 00 aadd32 \[%r1\+0x1eef\],%r2
+ 10: db 21 ef 1e 50 00 00 00 aand \[%r1\+0x1eef\],%r2
+ 18: c3 21 ef 1e 50 00 00 00 aand32 \[%r1\+0x1eef\],%r2
+ 20: db 21 ef 1e 40 00 00 00 aor \[%r1\+0x1eef\],%r2
+ 28: c3 21 ef 1e 40 00 00 00 aor32 \[%r1\+0x1eef\],%r2
+ 30: db 21 ef 1e a0 00 00 00 axor \[%r1\+0x1eef\],%r2
+ 38: c3 21 ef 1e a0 00 00 00 axor32 \[%r1\+0x1eef\],%r2
+ 40: db 21 ef 1e 01 00 00 00 afadd \[%r1\+0x1eef\],%r2
+ 48: c3 21 ef 1e 01 00 00 00 afadd32 \[%r1\+0x1eef\],%r2
+ 50: db 21 ef 1e 51 00 00 00 afand \[%r1\+0x1eef\],%r2
+ 58: c3 21 ef 1e 51 00 00 00 afand32 \[%r1\+0x1eef\],%r2
+ 60: db 21 ef 1e 41 00 00 00 afor \[%r1\+0x1eef\],%r2
+ 68: c3 21 ef 1e 41 00 00 00 afor32 \[%r1\+0x1eef\],%r2
+ 70: db 21 ef 1e a1 00 00 00 afxor \[%r1\+0x1eef\],%r2
+ 78: c3 21 ef 1e a1 00 00 00 afxor32 \[%r1\+0x1eef\],%r2
- # Test for eBPF ADDW and ADDDW instructions
+ # Test for eBPF atomic instructions
.text
aadd [%r1+0x1eef], %r2
aadd32 [%r1+0x1eef], %r2
-
+ aand [%r1+0x1eef], %r2
+ aand32 [%r1+0x1eef], %r2
+ aor [%r1+0x1eef], %r2
+ aor32 [%r1+0x1eef], %r2
+ axor [%r1+0x1eef], %r2
+ axor32 [%r1+0x1eef], %r2
+
+ afadd [%r1+0x1eef], %r2
+ afadd32 [%r1+0x1eef], %r2
+ afand [%r1+0x1eef], %r2
+ afand32 [%r1+0x1eef], %r2
+ afor [%r1+0x1eef], %r2
+ afor32 [%r1+0x1eef], %r2
+ afxor [%r1+0x1eef], %r2
+ afxor32 [%r1+0x1eef], %r2
run_dump_test jump-pseudoc
run_dump_test jump32
run_dump_test jump32-pseudoc
+ run_dump_test atomic-v1
run_dump_test atomic
run_dump_test atomic-pseudoc
run_dump_test indcall-1
run_dump_test jump-be-pseudoc
run_dump_test jump32-be
run_dump_test jump32-be-pseudoc
+ run_dump_test atomic-v1-be
run_dump_test atomic-be
run_dump_test atomic-be-pseudoc
}
BPF_V3, BPF_CODE, BPF_CLASS_JMP32|BPF_CODE_JNE|BPF_SRC_K},
/* Atomic instructions. */
- {BPF_INSN_AADD, "aadd%W[ %dr %o16 ] , %sr", "* ( u64 * ) ( %dr %o16 ) += %sr",
+ {BPF_INSN_AADD, "aadd%W[ %dr %o16 ] , %sr", "lock%w* ( u64 * ) ( %dr %o16 ) += %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AADD},
- {BPF_INSN_AOR, "aor%W[ %dr %o16 ] , %sr", "* ( u64 * ) ( %dr %o16 ) |= %sr",
+ {BPF_INSN_AOR, "aor%W[ %dr %o16 ] , %sr", "lock%w* ( u64 * ) ( %dr %o16 ) |= %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AOR},
- {BPF_INSN_AAND, "aand%W[ %dr %o16 ] , %sr", "* ( u64 * ) ( %dr %o16 ) &= %sr",
+ {BPF_INSN_AAND, "aand%W[ %dr %o16 ] , %sr", "lock%w* ( u64 * ) ( %dr %o16 ) &= %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AAND},
- {BPF_INSN_AXOR, "axor%W[ %dr %o16 ] , %sr", "* ( u64 * ) ( %dr %o16 ) ^= %sr",
+ {BPF_INSN_AXOR, "axor%W[ %dr %o16 ] , %sr", "lock%w* ( u64 * ) ( %dr %o16 ) ^= %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AXOR},
/* Atomic instructions with fetching. */
- {BPF_INSN_AFADD, "afadd%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFADD, "afadd%W[ %dr %o16 ] , %sr", "%sr = atomic_fetch_add ( ( u64 * ) ( %dr %o16 ) , %sr )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AFADD},
- {BPF_INSN_AFOR, "afor%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFOR, "afor%W[ %dr %o16 ] , %sr", "%sr = atomic_fetch_or ( ( u64 * ) ( %dr %o16 ) , %sr )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AFOR},
- {BPF_INSN_AFAND, "afand%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFAND, "afand%W[ %dr %o16 ] , %sr", "%sr = atomic_fetch_and ( ( u64 * ) ( %dr %o16 ) , %sr )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AFAND},
- {BPF_INSN_AFXOR, "afxor%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFXOR, "afxor%W[ %dr %o16 ] , %sr", "%sr = atomic_fetch_xor ( ( u64 * ) ( %dr %o16 ) , %sr )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_DW|BPF_MODE_ATOMIC|BPF_IMM32_AFXOR},
/* Atomic instructions (32-bit.) */
- {BPF_INSN_AADD32, "aadd32%W[ %dr %o16 ] , %sr", "* ( u32 * ) ( %dr %o16 ) += %sr",
+ {BPF_INSN_AADD32, "aadd32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) += %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AADD},
- {BPF_INSN_AOR32, "aor32%W[ %dr %o16 ] , %sr", "* ( u32 * ) ( %dr %o16 ) |= %sr",
+ {BPF_INSN_AOR32, "aor32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) |= %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AOR},
- {BPF_INSN_AAND32, "aand32%W[ %dr %o16 ] , %sr", "* ( u32 * ) ( %dr %o16 ) &= %sr",
+ {BPF_INSN_AAND32, "aand32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) &= %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AAND},
- {BPF_INSN_AXOR32, "axor32%W[ %dr %o16 ] , %sr", "* ( u32 * ) ( %dr %o16 ) ^= %sr",
+ {BPF_INSN_AXOR32, "axor32%W[ %dr %o16 ] , %sr", "lock%w* ( u32 * ) ( %dr %o16 ) ^= %sr",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AXOR},
/* Atomic instructions with fetching (32-bit.) */
- {BPF_INSN_AFADD32, "afadd32 %W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFADD32, "afadd32%W[ %dr %o16 ] , %sr", "%sw = atomic_fetch_add ( ( u32 * ) ( %dr %o16 ) , %sw )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AFADD},
- {BPF_INSN_AFOR32, "afor32%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFOR32, "afor32%W[ %dr %o16 ] , %sr", "%sw = atomic_fetch_or ( ( u32 * ) ( %dr %o16 ) , %sw )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AFOR},
- {BPF_INSN_AFAND32, "afand32%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFAND32, "afand32%W[ %dr %o16 ] , %sr", "%sw = atomic_fetch_and ( ( u32 * ) ( %dr %o16 ) , %sw )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AFAND},
- {BPF_INSN_AFXOR32, "afxor32%W[ %dr %o16 ] , %sr", "???",
+ {BPF_INSN_AFXOR32, "afxor32%W[ %dr %o16 ] , %sr", "%sw = atomic_fetch_xor ( ( u32 * ) ( %dr %o16 ) , %sw )",
BPF_V3, BPF_CODE|BPF_IMM32, BPF_CLASS_STX|BPF_SIZE_W|BPF_MODE_ATOMIC|BPF_IMM32_AFXOR},
/* Old versions of aadd and aadd32. */