1 ; RUN: llc < %s -march=bpfel -verify-machineinstrs -bpf-expand-memcpy-in-order | FileCheck %s
2 ; RUN: llc < %s -march=bpfeb -verify-machineinstrs -bpf-expand-memcpy-in-order | FileCheck %s
6 ; void cal_align1(void *a, void *b)
8 ; __builtin_memcpy(a, b, COPY_LEN);
11 ; void cal_align2(short *a, short *b)
13 ; __builtin_memcpy(a, b, COPY_LEN);
18 ; void cal_align4(int *a, int *b)
20 ; __builtin_memcpy(a, b, COPY_LEN);
25 ; void cal_align8(long long *a, long long *b)
27 ; __builtin_memcpy(a, b, COPY_LEN);
30 ; Function Attrs: nounwind
31 define dso_local void @cal_align1(i8* nocapture %a, i8* nocapture readonly %b) local_unnamed_addr #0 {
33 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %a, i8* align 1 %b, i64 9, i1 false)
37 ; Function Attrs: argmemonly nounwind
38 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
40 ; CHECK: [[SCRATCH_REG:r[0-9]]] = *(u8 *)([[SRC_REG:r[0-9]]] + 0)
41 ; CHECK: *(u8 *)([[DST_REG:r[0-9]]] + 0) = [[SCRATCH_REG]]
42 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 1)
43 ; CHECK: *(u8 *)([[DST_REG]] + 1) = [[SCRATCH_REG]]
44 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 2)
45 ; CHECK: *(u8 *)([[DST_REG]] + 2) = [[SCRATCH_REG]]
46 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 3)
47 ; CHECK: *(u8 *)([[DST_REG]] + 3) = [[SCRATCH_REG]]
48 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 4)
49 ; CHECK: *(u8 *)([[DST_REG]] + 4) = [[SCRATCH_REG]]
50 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 5)
51 ; CHECK: *(u8 *)([[DST_REG]] + 5) = [[SCRATCH_REG]]
52 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 6)
53 ; CHECK: *(u8 *)([[DST_REG]] + 6) = [[SCRATCH_REG]]
54 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 7)
55 ; CHECK: *(u8 *)([[DST_REG]] + 7) = [[SCRATCH_REG]]
56 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 8)
57 ; CHECK: *(u8 *)([[DST_REG]] + 8) = [[SCRATCH_REG]]
59 ; Function Attrs: nounwind
60 define dso_local void @cal_align2(i16* nocapture %a, i16* nocapture readonly %b) local_unnamed_addr #0 {
62 %0 = bitcast i16* %a to i8*
63 %1 = bitcast i16* %b to i8*
64 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %0, i8* align 2 %1, i64 9, i1 false)
67 ; CHECK: [[SCRATCH_REG:r[0-9]]] = *(u16 *)([[SRC_REG:r[0-9]]] + 0)
68 ; CHECK: *(u16 *)([[DST_REG:r[0-9]]] + 0) = [[SCRATCH_REG]]
69 ; CHECK: [[SCRATCH_REG]] = *(u16 *)([[SRC_REG]] + 2)
70 ; CHECK: *(u16 *)([[DST_REG]] + 2) = [[SCRATCH_REG]]
71 ; CHECK: [[SCRATCH_REG]] = *(u16 *)([[SRC_REG]] + 4)
72 ; CHECK: *(u16 *)([[DST_REG]] + 4) = [[SCRATCH_REG]]
73 ; CHECK: [[SCRATCH_REG]] = *(u16 *)([[SRC_REG]] + 6)
74 ; CHECK: *(u16 *)([[DST_REG]] + 6) = [[SCRATCH_REG]]
75 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 8)
76 ; CHECK: *(u8 *)([[DST_REG]] + 8) = [[SCRATCH_REG]]
78 ; Function Attrs: nounwind
79 define dso_local void @cal_align4(i32* nocapture %a, i32* nocapture readonly %b) local_unnamed_addr #0 {
81 %0 = bitcast i32* %a to i8*
82 %1 = bitcast i32* %b to i8*
83 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 19, i1 false)
86 ; CHECK: [[SCRATCH_REG:r[0-9]]] = *(u32 *)([[SRC_REG:r[0-9]]] + 0)
87 ; CHECK: *(u32 *)([[DST_REG:r[0-9]]] + 0) = [[SCRATCH_REG]]
88 ; CHECK: [[SCRATCH_REG]] = *(u32 *)([[SRC_REG]] + 4)
89 ; CHECK: *(u32 *)([[DST_REG]] + 4) = [[SCRATCH_REG]]
90 ; CHECK: [[SCRATCH_REG]] = *(u32 *)([[SRC_REG]] + 8)
91 ; CHECK: *(u32 *)([[DST_REG]] + 8) = [[SCRATCH_REG]]
92 ; CHECK: [[SCRATCH_REG]] = *(u32 *)([[SRC_REG]] + 12)
93 ; CHECK: *(u32 *)([[DST_REG]] + 12) = [[SCRATCH_REG]]
94 ; CHECK: [[SCRATCH_REG]] = *(u16 *)([[SRC_REG]] + 16)
95 ; CHECK: *(u16 *)([[DST_REG]] + 16) = [[SCRATCH_REG]]
96 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 18)
97 ; CHECK: *(u8 *)([[DST_REG]] + 18) = [[SCRATCH_REG]]
99 ; Function Attrs: nounwind
100 define dso_local void @cal_align8(i64* nocapture %a, i64* nocapture readonly %b) local_unnamed_addr #0 {
102 %0 = bitcast i64* %a to i8*
103 %1 = bitcast i64* %b to i8*
104 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 27, i1 false)
107 ; CHECK: [[SCRATCH_REG:r[0-9]]] = *(u64 *)([[SRC_REG:r[0-9]]] + 0)
108 ; CHECK: *(u64 *)([[DST_REG:r[0-9]]] + 0) = [[SCRATCH_REG]]
109 ; CHECK: [[SCRATCH_REG]] = *(u64 *)([[SRC_REG]] + 8)
110 ; CHECK: *(u64 *)([[DST_REG]] + 8) = [[SCRATCH_REG]]
111 ; CHECK: [[SCRATCH_REG]] = *(u64 *)([[SRC_REG]] + 16)
112 ; CHECK: *(u64 *)([[DST_REG]] + 16) = [[SCRATCH_REG]]
113 ; CHECK: [[SCRATCH_REG]] = *(u16 *)([[SRC_REG]] + 24)
114 ; CHECK: *(u16 *)([[DST_REG]] + 24) = [[SCRATCH_REG]]
115 ; CHECK: [[SCRATCH_REG]] = *(u8 *)([[SRC_REG]] + 26)
116 ; CHECK: *(u8 *)([[DST_REG]] + 26) = [[SCRATCH_REG]]