1 # RUN: not llvm-mc -mcpu=v4 -triple bpfel < %s 2>&1 \
2 # RUN: | grep 'error: operand is not an identifier or 16-bit signed integer' \
4 # RUN: not llvm-mc -mcpu=v4 -triple bpfel < %s 2>&1 \
5 # RUN: | grep 'error: operand is not a 16-bit signed integer' \
9 *(u64
*)(r1 + 70000) = 10
10 *(u32
*)(r1 - 70000) = 10
11 *(u16
*)(r1 - 70000) = 10
12 *(u8
*)(r1 - 70000) = 10
13 *(u64
*)(r1 + 70000) = r1
14 *(u32
*)(r1 - 70000) = r1
15 *(u16
*)(r1 - 70000) = r1
16 *(u8
*)(r1 - 70000) = r1
17 r1 = *(u64
*)(r1 + 70000)
18 r1 = *(u32
*)(r1 - 70000)
19 r1 = *(u16
*)(r1 - 70000)
20 r1 = *(u8
*)(r1 - 70000)
21 r1 = *(s32
*)(r1 + 70000)
22 r1 = *(s16
*)(r1 - 70000)
23 r1 = *(s8
*)(r1 - 70000)
24 lock
*(u32
*)(r1 + 70000) += w2
25 lock
*(u32
*)(r1 - 70000) &= w2
26 lock
*(u32
*)(r1 - 70000) |
= w2
27 lock
*(u32
*)(r1 - 70000) ^
= w2
28 r0 = atomic_fetch_add
((u64
*)(r1 + 70000), r0)
29 r0 = atomic_fetch_and
((u64
*)(r1 + 70000), r0)
30 r0 = atomic_fetch_xor
((u64
*)(r1 + 70000), r0)
31 r0 = atomic_fetch_or
((u64
*)(r1 + 70000), r0)
32 w0
= cmpxchg32_32
(r1 + 70000, w0
, w1
)
33 r0 = cmpxchg_64
(r1 + 70000, r0, r1)