1 ; RUN: llvm-as < %s | llc -march=xcore > %t1.s
2 ; RUN: grep "bl __misaligned_load" %t1.s | count 1
3 ; RUN: grep ld16s %t1.s | count 2
4 ; RUN: grep ldw %t1.s | count 2
5 ; RUN: grep shl %t1.s | count 2
6 ; RUN: grep shr %t1.s | count 1
7 ; RUN: grep zext %t1.s | count 1
8 ; RUN: grep "or " %t1.s | count 2
10 ; Byte aligned load. Expands to call to __misaligned_load.
11 define i32 @align1(i32* %p) nounwind {
13 %0 = load i32* %p, align 1 ; <i32> [#uses=1]
17 ; Half word aligned load. Expands to two 16bit loads.
18 define i32 @align2(i32* %p) nounwind {
20 %0 = load i32* %p, align 2 ; <i32> [#uses=1]
24 @a = global [5 x i8] zeroinitializer, align 4
26 ; Constant offset from word aligned base. Expands to two 32bit loads.
27 define i32 @align3() nounwind {
29 %0 = load i32* bitcast (i8* getelementptr ([5 x i8]* @a, i32 0, i32 1) to i32*), align 1