1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=powerpc64-- -mcpu=pwr5 -verify-machineinstrs < %s | \
4 define void @bs(ptr %p) {
8 ; CHECK-NEXT: lwbrx 5, 0, 3
9 ; CHECK-NEXT: lwbrx 4, 3, 4
10 ; CHECK-NEXT: rldimi 5, 4, 32, 0
11 ; CHECK-NEXT: std 5, 0(3)
13 %x = load i64, ptr %p, align 8
14 %b = call i64 @llvm.bswap.i64(i64 %x)
15 store i64 %b, ptr %p, align 8
19 define i64 @volatile_ld(ptr %p) {
20 ; CHECK-LABEL: volatile_ld:
22 ; CHECK-NEXT: ld 4, 0(3)
23 ; CHECK-NEXT: rotldi 5, 4, 16
24 ; CHECK-NEXT: rotldi 3, 4, 8
25 ; CHECK-NEXT: rldimi 3, 5, 8, 48
26 ; CHECK-NEXT: rotldi 5, 4, 24
27 ; CHECK-NEXT: rldimi 3, 5, 16, 40
28 ; CHECK-NEXT: rotldi 5, 4, 32
29 ; CHECK-NEXT: rldimi 3, 5, 24, 32
30 ; CHECK-NEXT: rotldi 5, 4, 48
31 ; CHECK-NEXT: rldimi 3, 5, 40, 16
32 ; CHECK-NEXT: rotldi 5, 4, 56
33 ; CHECK-NEXT: rldimi 3, 5, 48, 8
34 ; CHECK-NEXT: rldimi 3, 4, 56, 0
36 %x = load volatile i64, ptr %p, align 8
37 %b = call i64 @llvm.bswap.i64(i64 %x)
41 define i64 @misaligned_ld(ptr %p) {
42 ; CHECK-LABEL: misaligned_ld:
45 ; CHECK-NEXT: lwbrx 4, 3, 4
46 ; CHECK-NEXT: lwbrx 3, 0, 3
47 ; CHECK-NEXT: rldimi 3, 4, 32, 0
49 %x = load i64, ptr %p, align 1
50 %b = call i64 @llvm.bswap.i64(i64 %x)
54 declare i64 @llvm.bswap.i64(i64) #2