1 ; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -tail-dup-placement=0 | FileCheck %s
3 ; Test memcpy, memmove, and memset intrinsics.
5 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
6 target triple = "wasm32-unknown-unknown"
8 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
9 declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
10 declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1)
12 ; Test that return values are optimized.
14 ; CHECK-LABEL: copy_yes:
15 ; CHECK: i32.call $push0=, memcpy, $0, $1, $2{{$}}
16 ; CHECK-NEXT: return $pop0{{$}}
17 define i8* @copy_yes(i8* %dst, i8* %src, i32 %len) {
18 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
22 ; CHECK-LABEL: copy_no:
23 ; CHECK: i32.call $drop=, memcpy, $0, $1, $2{{$}}
24 ; CHECK-NEXT: return{{$}}
25 define void @copy_no(i8* %dst, i8* %src, i32 %len) {
26 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
30 ; CHECK-LABEL: move_yes:
31 ; CHECK: i32.call $push0=, memmove, $0, $1, $2{{$}}
32 ; CHECK-NEXT: return $pop0{{$}}
33 define i8* @move_yes(i8* %dst, i8* %src, i32 %len) {
34 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
38 ; CHECK-LABEL: move_no:
39 ; CHECK: i32.call $drop=, memmove, $0, $1, $2{{$}}
40 ; CHECK-NEXT: return{{$}}
41 define void @move_no(i8* %dst, i8* %src, i32 %len) {
42 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
46 ; CHECK-LABEL: set_yes:
47 ; CHECK: i32.call $push0=, memset, $0, $1, $2{{$}}
48 ; CHECK-NEXT: return $pop0{{$}}
49 define i8* @set_yes(i8* %dst, i8 %src, i32 %len) {
50 call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i1 false)
54 ; CHECK-LABEL: set_no:
55 ; CHECK: i32.call $drop=, memset, $0, $1, $2{{$}}
56 ; CHECK-NEXT: return{{$}}
57 define void @set_no(i8* %dst, i8 %src, i32 %len) {
58 call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i1 false)
63 ; CHECK-LABEL: frame_index:
64 ; CHECK: i32.call $drop=, memset, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
65 ; CHECK: i32.call $push{{[0-9]+}}=, memset, ${{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}{{$}}
67 define void @frame_index() {
69 %a = alloca [2048 x i8], align 16
70 %b = alloca [2048 x i8], align 16
71 %0 = getelementptr inbounds [2048 x i8], [2048 x i8]* %a, i32 0, i32 0
72 %1 = getelementptr inbounds [2048 x i8], [2048 x i8]* %b, i32 0, i32 0
73 call void @llvm.memset.p0i8.i32(i8* align 16 %0, i8 256, i32 1024, i1 false)
74 call void @llvm.memset.p0i8.i32(i8* align 16 %1, i8 256, i32 1024, i1 false)
78 ; If the result value of memset doesn't get stackified, it should be marked
79 ; $drop. Note that we use a call to prevent tail dup so that we can test
80 ; this specific functionality.
82 ; CHECK-LABEL: drop_result:
83 ; CHECK: i32.call $drop=, memset, $0, $1, $2
85 declare void @block_tail_dup()
86 define i8* @drop_result(i8* %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
88 %tmp = icmp eq i32 %arg3, 0
89 br i1 %tmp, label %bb5, label %bb9
92 %tmp6 = icmp eq i32 %arg4, 0
93 br i1 %tmp6, label %bb7, label %bb8
96 call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i1 false)
103 %tmp10 = call i8* @def()
107 %tmp12 = phi i8* [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
108 call void @block_tail_dup()
112 ; This is the same as drop_result, except we let tail dup happen, so the
113 ; result of the memset *is* stackified.
115 ; CHECK-LABEL: tail_dup_to_reuse_result:
116 ; CHECK: i32.call $push{{[0-9]+}}=, memset, $0, $1, $2
117 define i8* @tail_dup_to_reuse_result(i8* %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
119 %tmp = icmp eq i32 %arg3, 0
120 br i1 %tmp, label %bb5, label %bb9
123 %tmp6 = icmp eq i32 %arg4, 0
124 br i1 %tmp6, label %bb7, label %bb8
127 call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i1 false)
134 %tmp10 = call i8* @def()
138 %tmp12 = phi i8* [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]