1 ; RUN: llc -aarch64-sve-vector-bits-min=128 -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
2 ; RUN: llc -aarch64-sve-vector-bits-min=256 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
3 ; RUN: llc -aarch64-sve-vector-bits-min=384 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
4 ; RUN: llc -aarch64-sve-vector-bits-min=512 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
5 ; RUN: llc -aarch64-sve-vector-bits-min=640 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
6 ; RUN: llc -aarch64-sve-vector-bits-min=768 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
7 ; RUN: llc -aarch64-sve-vector-bits-min=896 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
8 ; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
9 ; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
10 ; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
11 ; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
12 ; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
13 ; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
14 ; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
15 ; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
16 ; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512
18 target triple = "aarch64-unknown-linux-gnu"
20 ; Don't use SVE when its registers are no bigger than NEON.
27 define void @masked_scatter_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
28 ; CHECK-LABEL: masked_scatter_v2i8:
29 ; CHECK: ldrb [[VALS_LO:w[0-9]+]], [x0]
30 ; CHECK-NEXT: ldrb [[VALS_HI:w[0-9]+]], [x0, #1]
31 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
32 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
33 ; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]]
34 ; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]]
35 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
36 ; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
37 ; CHECK-NEXT: ushll v[[SHL2:[0-9]+]].2d, v[[VALS]].2s, #0
38 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
39 ; CHECK-NEXT: st1b { z[[SHL2]].d }, [[MASK]], [z[[PTRS]].d]
41 %vals = load <2 x i8>, <2 x i8>* %a
42 %ptrs = load <2 x i8*>, <2 x i8*>* %b
43 %mask = icmp eq <2 x i8> %vals, zeroinitializer
44 call void @llvm.masked.scatter.v2i8(<2 x i8> %vals, <2 x i8*> %ptrs, i32 8, <2 x i1> %mask)
48 define void @masked_scatter_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
49 ; CHECK-LABEL: masked_scatter_v4i8:
50 ; CHECK: ldr s[[VALS:[0-9]+]], [x0]
51 ; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
52 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
53 ; CHECK-NEXT: ushll [[SHL:v[0-9]+]].8h, v[[VALS]].8b, #0
54 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, [[SHL]].4h, #0
55 ; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
56 ; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
57 ; CHECK-NEXT: uunpklo z[[UPK2:[0-9]+]].d, [[UPK1]].s
58 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, z[[UPK2]].d, #0
59 ; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
60 ; CHECK-NEXT: st1b { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
62 %vals = load <4 x i8>, <4 x i8>* %a
63 %ptrs = load <4 x i8*>, <4 x i8*>* %b
64 %mask = icmp eq <4 x i8> %vals, zeroinitializer
65 call void @llvm.masked.scatter.v4i8(<4 x i8> %vals, <4 x i8*> %ptrs, i32 8, <4 x i1> %mask)
69 define void @masked_scatter_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
70 ; CHECK-LABEL: masked_scatter_v8i8:
71 ; VBITS_GE_512: ldr d[[VALS:[0-9]+]], [x0]
72 ; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
73 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
74 ; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8b, v[[VALS]].8b, #0
75 ; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[CMP]].b
76 ; VBITS_GE_512-NEXT: uunpklo [[UPKV1:z[0-9]+]].h, z[[VALS]].b
77 ; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
78 ; VBITS_GE_512-NEXT: uunpklo [[UPKV2:z[0-9]+]].s, [[UPKV1]].h
79 ; VBITS_GE_512-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
80 ; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK3]].d, #0
81 ; VBITS_GE_512-NEXT: uunpklo [[UPKV3:z[0-9]+]].d, [[UPKV2]].s
82 ; VBITS_GE_512-NEXT: st1b { [[UPKV3]].d }, [[MASK]], {{\[}}[[PTRS]].d]
83 ; VBITS_GE_512-NEXT: ret
85 ; Ensure sensible type legalisation.
86 ; VBITS_EQ_256-DAG: ldr d[[VALS:[0-9]+]], [x0]
87 ; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
88 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
89 ; VBITS_EQ_256-DAG: cmeq [[ZMSK:v[0-9]+]].8b, v[[VALS]].8b, #0
90 ; VBITS_EQ_256-DAG: zip1 v[[VAL_LO:[0-9]+]].8b, [[ZMSK]].8b, v[[VALS]].8b
91 ; VBITS_EQ_256-DAG: zip2 v[[VAL_HI:[0-9]+]].8b, [[ZMSK]].8b, v[[VALS]].8b
92 ; VBITS_EQ_256-DAG: shl [[SHL_LO:v[0-9]+]].4h, v[[VAL_LO]].4h, #8
93 ; VBITS_EQ_256-DAG: shl [[SHL_HI:v[0-9]+]].4h, v[[VAL_HI]].4h, #8
94 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
95 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
96 ; VBITS_EQ_256-DAG: sshr v[[SSHR_LO:[0-9]+]].4h, [[SHL_LO]].4h, #8
97 ; VBITS_EQ_256-DAG: sshr v[[SSHR_HI:[0-9]+]].4h, [[SHL_HI]].4h, #8
98 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[VAL_LO]].h
99 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[VAL_HI]].h
100 ; VBITS_EQ_256-DAG: uunpklo z[[UPK2_LO:[0-9]+]].d, [[UPK1_LO]].s
101 ; VBITS_EQ_256-DAG: uunpklo z[[UPK2_HI:[0-9]+]].d, [[UPK1_HI]].s
102 ; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG]]/z, z[[UPK2_LO]].d, #0
103 ; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG]]/z, z[[UPK2_HI]].d, #0
104 ; VBITS_EQ_256-DAG: zip1 v[[VALS2_LO:[0-9]+]].8b, v[[VALS]].8b, v[[VALS]].8b
105 ; VBITS_EQ_256-DAG: zip2 v[[VALS2_HI:[0-9]+]].8b, v[[VALS]].8b, v[[VALS]].8b
106 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[VALS2_LO]].h
107 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[VALS2_HI]].h
108 ; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
109 ; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
110 ; VBITS_EQ_256-DAG: st1b { [[UPK2_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
111 ; VBITS_EQ_256-DAG: st1b { [[UPK2_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
112 ; VBITS_EQ_256-NEXT: ret
113 %vals = load <8 x i8>, <8 x i8>* %a
114 %ptrs = load <8 x i8*>, <8 x i8*>* %b
115 %mask = icmp eq <8 x i8> %vals, zeroinitializer
116 call void @llvm.masked.scatter.v8i8(<8 x i8> %vals, <8 x i8*> %ptrs, i32 8, <8 x i1> %mask)
120 define void @masked_scatter_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 {
121 ; CHECK-LABEL: masked_scatter_v16i8:
122 ; VBITS_GE_1024: ldr q[[VALS:[0-9]+]], [x0]
123 ; VBITS_GE_1024-NEXT: ptrue [[PG:p[0-9]+]].d, vl16
124 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
125 ; VBITS_GE_1024-NEXT: cmeq v[[CMP:[0-9]+]].16b, v[[VALS]].16b, #0
126 ; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[CMP]].b
127 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV1:z[0-9]+]].h, z[[VALS]].b
128 ; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
129 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV2:z[0-9]+]].s, [[UPKV1]].h
130 ; VBITS_GE_1024-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
131 ; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK3]].d, #0
132 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV3:z[0-9]+]].d, [[UPKV2]].s
133 ; VBITS_GE_1024-NEXT: st1b { [[UPKV3]].d }, [[MASK]], {{\[}}[[PTRS]].d]
134 ; VBITS_GE_1024-NEXT: ret
135 %vals = load <16 x i8>, <16 x i8>* %a
136 %ptrs = load <16 x i8*>, <16 x i8*>* %b
137 %mask = icmp eq <16 x i8> %vals, zeroinitializer
138 call void @llvm.masked.scatter.v16i8(<16 x i8> %vals, <16 x i8*> %ptrs, i32 8, <16 x i1> %mask)
142 define void @masked_scatter_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 {
143 ; CHECK-LABEL: masked_scatter_v32i8:
144 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].b, vl32
145 ; VBITS_GE_2048-NEXT: ld1b { [[VALS:z[0-9]+]].b }, [[PG0]]/z, [x0]
146 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
147 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
148 ; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].b, [[PG0]]/z, [[VALS]].b, #0
149 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].b, [[PG0]]/z, #-1
150 ; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].h, [[MONE]].b
151 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].h, [[VALS]].b
152 ; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
153 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].s, [[UPKV1]].h
154 ; VBITS_GE_2048-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
155 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK3]].d, #0
156 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV3:z[0-9]+]].d, [[UPKV2]].s
157 ; VBITS_GE_2048-NEXT: st1b { [[UPKV3]].d }, [[MASK]], {{\[}}[[PTRS]].d]
158 ; VBITS_GE_2048-NEXT: ret
159 %vals = load <32 x i8>, <32 x i8>* %a
160 %ptrs = load <32 x i8*>, <32 x i8*>* %b
161 %mask = icmp eq <32 x i8> %vals, zeroinitializer
162 call void @llvm.masked.scatter.v32i8(<32 x i8> %vals, <32 x i8*> %ptrs, i32 8, <32 x i1> %mask)
170 define void @masked_scatter_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
171 ; CHECK-LABEL: masked_scatter_v2i16:
172 ; CHECK: ldrh [[VALS_LO:w[0-9]+]], [x0]
173 ; CHECK-NEXT: ldrh [[VALS_HI:w[0-9]+]], [x0, #2]
174 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
175 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
176 ; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]]
177 ; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]]
178 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
179 ; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
180 ; CHECK-NEXT: ushll v[[SHL2:[0-9]+]].2d, v[[VALS]].2s, #0
181 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
182 ; CHECK-NEXT: st1h { z[[SHL2]].d }, [[MASK]], [z[[PTRS]].d]
184 %vals = load <2 x i16>, <2 x i16>* %a
185 %ptrs = load <2 x i16*>, <2 x i16*>* %b
186 %mask = icmp eq <2 x i16> %vals, zeroinitializer
187 call void @llvm.masked.scatter.v2i16(<2 x i16> %vals, <2 x i16*> %ptrs, i32 8, <2 x i1> %mask)
191 define void @masked_scatter_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 {
192 ; CHECK-LABEL: masked_scatter_v4i16:
193 ; CHECK: ldr d[[VALS:[0-9]+]], [x0]
194 ; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
195 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
196 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0
197 ; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
198 ; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
199 ; CHECK-NEXT: uunpklo z[[UPK2:[0-9]+]].d, [[UPK1]].s
200 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, z[[UPK2]].d, #0
201 ; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
202 ; CHECK-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
204 %vals = load <4 x i16>, <4 x i16>* %a
205 %ptrs = load <4 x i16*>, <4 x i16*>* %b
206 %mask = icmp eq <4 x i16> %vals, zeroinitializer
207 call void @llvm.masked.scatter.v4i16(<4 x i16> %vals, <4 x i16*> %ptrs, i32 8, <4 x i1> %mask)
211 define void @masked_scatter_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 {
212 ; CHECK-LABEL: masked_scatter_v8i16:
213 ; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0]
214 ; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
215 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
216 ; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0
217 ; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
218 ; VBITS_GE_512-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
219 ; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
220 ; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
221 ; VBITS_GE_512-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
222 ; VBITS_GE_512-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
223 ; VBITS_GE_512-NEXT: ret
225 ; Ensure sensible type legalisation.
226 ; VBITS_EQ_256-DAG: ldr q[[VALS:[0-9]+]], [x0]
227 ; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
228 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
229 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
230 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
231 ; VBITS_EQ_256-DAG: cmeq v[[ZMSK:[0-9]+]].8h, v[[VALS]].8h, #0
232 ; VBITS_EQ_256-DAG: ext v[[EXT:[0-9]+]].16b, v[[VALS]].16b, v[[VALS]].16b, #8
233 ; VBITS_EQ_256-DAG: ext v[[ZEXT:[0-9]+]].16b, v[[ZMSK]].16b, v[[ZMSK]].16b, #8
234 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[ZMSK]].h
235 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[ZEXT]].h
236 ; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
237 ; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
238 ; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG]]/z, [[UPK2_LO]].d, #0
239 ; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG]]/z, [[UPK2_HI]].d, #0
240 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[VALS]].h
241 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[EXT]].h
242 ; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
243 ; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
244 ; VBITS_EQ_256-DAG: st1h { [[UPK2_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
245 ; VBITS_EQ_256-DAG: st1h { [[UPK2_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
246 ; VBITS_EQ_256-NEXT: ret
247 %vals = load <8 x i16>, <8 x i16>* %a
248 %ptrs = load <8 x i16*>, <8 x i16*>* %b
249 %mask = icmp eq <8 x i16> %vals, zeroinitializer
250 call void @llvm.masked.scatter.v8i16(<8 x i16> %vals, <8 x i16*> %ptrs, i32 8, <8 x i1> %mask)
254 define void @masked_scatter_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 {
255 ; CHECK-LABEL: masked_scatter_v16i16:
256 ; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16
257 ; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
258 ; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
259 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
260 ; VBITS_GE_1024-NEXT: cmpeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0
261 ; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
262 ; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
263 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
264 ; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
265 ; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
266 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
267 ; VBITS_GE_1024-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
268 ; VBITS_GE_1024-NEXT: ret
269 %vals = load <16 x i16>, <16 x i16>* %a
270 %ptrs = load <16 x i16*>, <16 x i16*>* %b
271 %mask = icmp eq <16 x i16> %vals, zeroinitializer
272 call void @llvm.masked.scatter.v16i16(<16 x i16> %vals, <16 x i16*> %ptrs, i32 8, <16 x i1> %mask)
276 define void @masked_scatter_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 {
277 ; CHECK-LABEL: masked_scatter_v32i16:
278 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
279 ; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
280 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
281 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
282 ; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0
283 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
284 ; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
285 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
286 ; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
287 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
288 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
289 ; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
290 ; VBITS_GE_2048-NEXT: ret
291 %vals = load <32 x i16>, <32 x i16>* %a
292 %ptrs = load <32 x i16*>, <32 x i16*>* %b
293 %mask = icmp eq <32 x i16> %vals, zeroinitializer
294 call void @llvm.masked.scatter.v32i16(<32 x i16> %vals, <32 x i16*> %ptrs, i32 8, <32 x i1> %mask)
302 define void @masked_scatter_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 {
303 ; CHECK-LABEL: masked_scatter_v2i32:
304 ; CHECK: ldr d[[VALS:[0-9]+]], [x0]
305 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
306 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
307 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
308 ; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
309 ; CHECK-NEXT: ushll v[[SHL2:[0-9]+]].2d, v[[VALS]].2s, #0
310 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
311 ; CHECK-NEXT: st1w { z[[SHL2]].d }, [[MASK]], [z[[PTRS]].d]
313 %vals = load <2 x i32>, <2 x i32>* %a
314 %ptrs = load <2 x i32*>, <2 x i32*>* %b
315 %mask = icmp eq <2 x i32> %vals, zeroinitializer
316 call void @llvm.masked.scatter.v2i32(<2 x i32> %vals, <2 x i32*> %ptrs, i32 8, <2 x i1> %mask)
320 define void @masked_scatter_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 {
321 ; CHECK-LABEL: masked_scatter_v4i32:
322 ; CHECK: ldr q[[VALS:[0-9]+]], [x0]
323 ; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
324 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
325 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0
326 ; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[CMP]].s
327 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK]].d, #0
328 ; CHECK-NEXT: uunpklo [[UPKV:z[0-9]+]].d, z[[VALS]].s
329 ; CHECK-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
331 %vals = load <4 x i32>, <4 x i32>* %a
332 %ptrs = load <4 x i32*>, <4 x i32*>* %b
333 %mask = icmp eq <4 x i32> %vals, zeroinitializer
334 call void @llvm.masked.scatter.v4i32(<4 x i32> %vals, <4 x i32*> %ptrs, i32 8, <4 x i1> %mask)
338 define void @masked_scatter_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
339 ; CHECK-LABEL: masked_scatter_v8i32:
340 ; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
341 ; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
342 ; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8
343 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
344 ; VBITS_GE_512-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
345 ; VBITS_GE_512-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
346 ; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
347 ; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
348 ; VBITS_GE_512-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
349 ; VBITS_GE_512-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
350 ; VBITS_GE_512-NEXT: ret
352 ; Ensure sensible type legalisation.
353 ; VBITS_EQ_256-DAG: ptrue [[PG0:p[0-9]+]].s, vl8
354 ; VBITS_EQ_256-DAG: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
355 ; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
356 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
357 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG1]]/z, [x1]
358 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG1]]/z, [x1, x[[NUMELTS]], lsl #3]
359 ; VBITS_EQ_256-DAG: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
360 ; VBITS_EQ_256-DAG: add x8, sp, #32
361 ; VBITS_EQ_256-DAG: mov x9, sp
362 ; VBITS_EQ_256-DAG: mov [[MONE:z[0-9]+]].s, [[MASK]]/z, #-1
363 ; VBITS_EQ_256-DAG: st1w { [[MONE]].s }, [[PG0]], [x8]
364 ; VBITS_EQ_256-DAG: st1w { [[VALS]].s }, [[PG0]], [x9]
365 ; VBITS_EQ_256-DAG: ldr q[[CMP_LO:[0-9]+]], [sp, #32]
366 ; VBITS_EQ_256-DAG: ldr q[[VAL_LO:[0-9]+]], [sp]
367 ; VBITS_EQ_256-DAG: uunpklo [[UPKC_LO:z[0-9]+]].d, z[[CMP_LO]].s
368 ; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG1]]/z, [[UPKC_LO]].d, #0
369 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].d, z[[VAL_LO]].s
370 ; VBITS_EQ_256-DAG: st1w { [[UPK1_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
371 ; VBITS_EQ_256-DAG: ldr q[[CMP_HI:[0-9]+]], [sp, #48]
372 ; VBITS_EQ_256-DAG: ldr q[[VAL_HI:[0-9]+]], [sp, #16]
373 ; VBITS_EQ_256-DAG: uunpklo [[UPKC_HI:z[0-9]+]].d, z[[CMP_HI]].s
374 ; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG1]]/z, [[UPKC_HI]].d, #0
375 ; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].d, z[[VAL_HI]].s
376 ; VBITS_EQ_256-DAG: st1w { [[UPK1_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
377 %vals = load <8 x i32>, <8 x i32>* %a
378 %ptrs = load <8 x i32*>, <8 x i32*>* %b
379 %mask = icmp eq <8 x i32> %vals, zeroinitializer
380 call void @llvm.masked.scatter.v8i32(<8 x i32> %vals, <8 x i32*> %ptrs, i32 8, <8 x i1> %mask)
384 define void @masked_scatter_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 {
385 ; CHECK-LABEL: masked_scatter_v16i32:
386 ; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16
387 ; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
388 ; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
389 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
390 ; VBITS_GE_1024-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
391 ; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
392 ; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
393 ; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
394 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
395 ; VBITS_GE_1024-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
396 ; VBITS_GE_1024-NEXT: ret
397 %vals = load <16 x i32>, <16 x i32>* %a
398 %ptrs = load <16 x i32*>, <16 x i32*>* %b
399 %mask = icmp eq <16 x i32> %vals, zeroinitializer
400 call void @llvm.masked.scatter.v16i32(<16 x i32> %vals, <16 x i32*> %ptrs, i32 8, <16 x i1> %mask)
404 define void @masked_scatter_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 {
405 ; CHECK-LABEL: masked_scatter_v32i32:
406 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
407 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
408 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
409 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
410 ; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
411 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
412 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
413 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
414 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
415 ; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
416 ; VBITS_GE_2048-NEXT: ret
417 %vals = load <32 x i32>, <32 x i32>* %a
418 %ptrs = load <32 x i32*>, <32 x i32*>* %b
419 %mask = icmp eq <32 x i32> %vals, zeroinitializer
420 call void @llvm.masked.scatter.v32i32(<32 x i32> %vals, <32 x i32*> %ptrs, i32 8, <32 x i1> %mask)
428 ; Scalarize 1 x i64 scatters
429 define void @masked_scatter_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 {
430 ; CHECK-LABEL: masked_scatter_v1i64:
432 %vals = load <1 x i64>, <1 x i64>* %a
433 %ptrs = load <1 x i64*>, <1 x i64*>* %b
434 %mask = icmp eq <1 x i64> %vals, zeroinitializer
435 call void @llvm.masked.scatter.v1i64(<1 x i64> %vals, <1 x i64*> %ptrs, i32 8, <1 x i1> %mask)
439 define void @masked_scatter_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 {
440 ; CHECK-LABEL: masked_scatter_v2i64:
441 ; CHECK: ldr q[[VALS:[0-9]+]], [x0]
442 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
443 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
444 ; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0
445 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0
446 ; CHECK-NEXT: st1d { z[[VALS]].d }, [[MASK]], [z[[PTRS]].d]
448 %vals = load <2 x i64>, <2 x i64>* %a
449 %ptrs = load <2 x i64*>, <2 x i64*>* %b
450 %mask = icmp eq <2 x i64> %vals, zeroinitializer
451 call void @llvm.masked.scatter.v2i64(<2 x i64> %vals, <2 x i64*> %ptrs, i32 8, <2 x i1> %mask)
455 define void @masked_scatter_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 {
456 ; CHECK-LABEL: masked_scatter_v4i64:
457 ; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4
458 ; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
459 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
460 ; CHECK-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
461 ; CHECK-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
463 %vals = load <4 x i64>, <4 x i64>* %a
464 %ptrs = load <4 x i64*>, <4 x i64*>* %b
465 %mask = icmp eq <4 x i64> %vals, zeroinitializer
466 call void @llvm.masked.scatter.v4i64(<4 x i64> %vals, <4 x i64*> %ptrs, i32 8, <4 x i1> %mask)
470 define void @masked_scatter_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 {
471 ; CHECK-LABEL: masked_scatter_v8i64:
472 ; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
473 ; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
474 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
475 ; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
476 ; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
477 ; VBITS_GE_512-NEXT: ret
479 ; Ensure sensible type legalisation.
480 ; VBITS_EQ_256-DAG: ptrue [[PG0:p[0-9]+]].d, vl4
481 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
482 ; VBITS_EQ_256-DAG: ld1d { [[VALS_LO:z[0-9]+]].d }, [[PG0]]/z, [x0]
483 ; VBITS_EQ_256-DAG: ld1d { [[VALS_HI:z[0-9]+]].d }, [[PG0]]/z, [x0, x[[NUMELTS]], lsl #3]
484 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG0]]/z, [x1]
485 ; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG0]]/z, [x1, x[[NUMELTS]], lsl #3]
486 ; VBITS_EQ_256-DAG: cmpeq [[MASK_LO:p[0-9]+]].d, [[PG0]]/z, [[VALS_LO]].d, #0
487 ; VBITS_EQ_256-DAG: cmpeq [[MASK_HI:p[0-9]+]].d, [[PG0]]/z, [[VALS_HI]].d, #0
488 ; VBITS_EQ_256-DAG: st1d { [[VALS_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
489 ; VBITS_EQ_256-DAG: st1d { [[VALS_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
490 ; VBITS_EQ_256-NEXT: ret
491 %vals = load <8 x i64>, <8 x i64>* %a
492 %ptrs = load <8 x i64*>, <8 x i64*>* %b
493 %mask = icmp eq <8 x i64> %vals, zeroinitializer
494 call void @llvm.masked.scatter.v8i64(<8 x i64> %vals, <8 x i64*> %ptrs, i32 8, <8 x i1> %mask)
498 define void @masked_scatter_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 {
499 ; CHECK-LABEL: masked_scatter_v16i64:
500 ; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16
501 ; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
502 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
503 ; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
504 ; VBITS_GE_1024-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
505 ; VBITS_GE_1024-NEXT: ret
506 %vals = load <16 x i64>, <16 x i64>* %a
507 %ptrs = load <16 x i64*>, <16 x i64*>* %b
508 %mask = icmp eq <16 x i64> %vals, zeroinitializer
509 call void @llvm.masked.scatter.v16i64(<16 x i64> %vals, <16 x i64*> %ptrs, i32 8, <16 x i1> %mask)
513 define void @masked_scatter_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 {
514 ; CHECK-LABEL: masked_scatter_v32i64:
515 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
516 ; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
517 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
518 ; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
519 ; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
520 ; VBITS_GE_2048-NEXT: ret
521 %vals = load <32 x i64>, <32 x i64>* %a
522 %ptrs = load <32 x i64*>, <32 x i64*>* %b
523 %mask = icmp eq <32 x i64> %vals, zeroinitializer
524 call void @llvm.masked.scatter.v32i64(<32 x i64> %vals, <32 x i64*> %ptrs, i32 8, <32 x i1> %mask)
532 define void @masked_scatter_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 {
533 ; CHECK-LABEL: masked_scatter_v2f16:
534 ; CHECK: ldr s[[VALS:[0-9]+]], [x0]
535 ; CHECK-NEXT: movi d2, #0000000000000000
536 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
537 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4
538 ; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0.0
539 ; CHECK-NEXT: umov w8, v[[CMP]].h[0]
540 ; CHECK-NEXT: umov w9, v[[CMP]].h[1]
541 ; CHECK-NEXT: fmov s[[CMP]], w8
542 ; CHECK-NEXT: mov v[[CMP]].s[1], w9
543 ; CHECK-NEXT: shl v[[CMP]].2s, v[[CMP]].2s, #16
544 ; CHECK-NEXT: sshr v[[CMP]].2s, v[[CMP]].2s, #16
545 ; CHECK-NEXT: fmov w9, s[[CMP]]
546 ; CHECK-NEXT: mov w8, v[[CMP]].s[1]
547 ; CHECK-NEXT: mov v[[NCMP:[0-9]+]].h[0], w9
548 ; CHECK-NEXT: mov v[[NCMP]].h[1], w8
549 ; CHECK-NEXT: shl v[[NCMP]].4h, v[[NCMP]].4h, #15
550 ; CHECK-NEXT: sshr v[[NCMP]].4h, v[[NCMP]].4h, #15
551 ; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[NCMP]].h
552 ; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
553 ; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
554 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, [[UPK2]].d, #0
555 ; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
556 ; CHECK-NEXT: st1h { [[UPKV2]].d }, [[MASK]], [z[[PTRS]].d]
558 %vals = load <2 x half>, <2 x half>* %a
559 %ptrs = load <2 x half*>, <2 x half*>* %b
560 %mask = fcmp oeq <2 x half> %vals, zeroinitializer
561 call void @llvm.masked.scatter.v2f16(<2 x half> %vals, <2 x half*> %ptrs, i32 8, <2 x i1> %mask)
565 define void @masked_scatter_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 {
566 ; CHECK-LABEL: masked_scatter_v4f16:
567 ; CHECK: ldr d[[VALS:[0-9]+]], [x0]
568 ; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
569 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
570 ; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0
571 ; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
572 ; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
573 ; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
574 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
575 ; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
576 ; CHECK-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
578 %vals = load <4 x half>, <4 x half>* %a
579 %ptrs = load <4 x half*>, <4 x half*>* %b
580 %mask = fcmp oeq <4 x half> %vals, zeroinitializer
581 call void @llvm.masked.scatter.v4f16(<4 x half> %vals, <4 x half*> %ptrs, i32 8, <4 x i1> %mask)
585 define void @masked_scatter_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 {
586 ; CHECK-LABEL: masked_scatter_v8f16:
587 ; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0]
588 ; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
589 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
590 ; VBITS_GE_512-NEXT: fcmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0
591 ; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
592 ; VBITS_GE_512-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
593 ; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
594 ; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
595 ; VBITS_GE_512-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
596 ; VBITS_GE_512-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
597 ; VBITS_GE_512-NEXT: ret
598 %vals = load <8 x half>, <8 x half>* %a
599 %ptrs = load <8 x half*>, <8 x half*>* %b
600 %mask = fcmp oeq <8 x half> %vals, zeroinitializer
601 call void @llvm.masked.scatter.v8f16(<8 x half> %vals, <8 x half*> %ptrs, i32 8, <8 x i1> %mask)
605 define void @masked_scatter_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 {
606 ; CHECK-LABEL: masked_scatter_v16f16:
607 ; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16
608 ; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
609 ; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
610 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
611 ; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
612 ; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
613 ; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
614 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
615 ; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
616 ; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
617 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
618 ; VBITS_GE_1024-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
619 ; VBITS_GE_1024-NEXT: ret
620 %vals = load <16 x half>, <16 x half>* %a
621 %ptrs = load <16 x half*>, <16 x half*>* %b
622 %mask = fcmp oeq <16 x half> %vals, zeroinitializer
623 call void @llvm.masked.scatter.v16f16(<16 x half> %vals, <16 x half*> %ptrs, i32 8, <16 x i1> %mask)
627 define void @masked_scatter_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 {
628 ; CHECK-LABEL: masked_scatter_v32f16:
629 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
630 ; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
631 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
632 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
633 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
634 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
635 ; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
636 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
637 ; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
638 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
639 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
640 ; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
641 ; VBITS_GE_2048-NEXT: ret
642 %vals = load <32 x half>, <32 x half>* %a
643 %ptrs = load <32 x half*>, <32 x half*>* %b
644 %mask = fcmp oeq <32 x half> %vals, zeroinitializer
645 call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask)
653 define void @masked_scatter_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 {
654 ; CHECK-LABEL: masked_scatter_v2f32:
655 ; CHECK: ldr d[[VALS:[0-9]+]], [x0]
656 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
657 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
658 ; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
659 ; CHECK-NEXT: ushll v[[SHLC:[0-9]+]].2d, v[[CMP]].2s, #0
660 ; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[VALS]].2s, #0
661 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHLC]].d, #0
662 ; CHECK-NEXT: st1w { z[[SHL]].d }, [[MASK]], [z[[PTRS]].d]
664 %vals = load <2 x float>, <2 x float>* %a
665 %ptrs = load <2 x float*>, <2 x float*>* %b
666 %mask = fcmp oeq <2 x float> %vals, zeroinitializer
667 call void @llvm.masked.scatter.v2f32(<2 x float> %vals, <2 x float*> %ptrs, i32 8, <2 x i1> %mask)
671 define void @masked_scatter_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 {
672 ; CHECK-LABEL: masked_scatter_v4f32:
673 ; CHECK: ldr q[[VALS:[0-9]+]], [x0]
674 ; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
675 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
676 ; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0
677 ; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[CMP]].s
678 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK]].d, #0
679 ; CHECK-NEXT: uunpklo [[UPKV:z[0-9]+]].d, z[[VALS]].s
680 ; CHECK-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
682 %vals = load <4 x float>, <4 x float>* %a
683 %ptrs = load <4 x float*>, <4 x float*>* %b
684 %mask = fcmp oeq <4 x float> %vals, zeroinitializer
685 call void @llvm.masked.scatter.v4f32(<4 x float> %vals, <4 x float*> %ptrs, i32 8, <4 x i1> %mask)
689 define void @masked_scatter_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 {
690 ; CHECK-LABEL: masked_scatter_v8f32:
691 ; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
692 ; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
693 ; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8
694 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
695 ; VBITS_GE_512-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
696 ; VBITS_GE_512-NEXT: mov [[MONE:z[0-9]]].s, [[CMP]]/z, #-1
697 ; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
698 ; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
699 ; VBITS_GE_512-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
700 ; VBITS_GE_512-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
701 ; VBITS_GE_512-NEXT: ret
702 %vals = load <8 x float>, <8 x float>* %a
703 %ptrs = load <8 x float*>, <8 x float*>* %b
704 %mask = fcmp oeq <8 x float> %vals, zeroinitializer
705 call void @llvm.masked.scatter.v8f32(<8 x float> %vals, <8 x float*> %ptrs, i32 8, <8 x i1> %mask)
709 define void @masked_scatter_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 {
710 ; CHECK-LABEL: masked_scatter_v16f32:
711 ; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16
712 ; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
713 ; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
714 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
715 ; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
716 ; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]]].s, [[CMP]]/z, #-1
717 ; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
718 ; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
719 ; VBITS_GE_1024-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
720 ; VBITS_GE_1024-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
721 ; VBITS_GE_1024-NEXT: ret
722 %vals = load <16 x float>, <16 x float>* %a
723 %ptrs = load <16 x float*>, <16 x float*>* %b
724 %mask = fcmp oeq <16 x float> %vals, zeroinitializer
725 call void @llvm.masked.scatter.v16f32(<16 x float> %vals, <16 x float*> %ptrs, i32 8, <16 x i1> %mask)
729 define void @masked_scatter_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 {
730 ; CHECK-LABEL: masked_scatter_v32f32:
731 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
732 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
733 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
734 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
735 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
736 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]]].s, [[CMP]]/z, #-1
737 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
738 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
739 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
740 ; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
741 ; VBITS_GE_2048-NEXT: ret
742 %vals = load <32 x float>, <32 x float>* %a
743 %ptrs = load <32 x float*>, <32 x float*>* %b
744 %mask = fcmp oeq <32 x float> %vals, zeroinitializer
745 call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask)
753 ; Scalarize 1 x double scatters
754 define void @masked_scatter_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 {
755 ; CHECK-LABEL: masked_scatter_v1f64:
757 %vals = load <1 x double>, <1 x double>* %a
758 %ptrs = load <1 x double*>, <1 x double*>* %b
759 %mask = fcmp oeq <1 x double> %vals, zeroinitializer
760 call void @llvm.masked.scatter.v1f64(<1 x double> %vals, <1 x double*> %ptrs, i32 8, <1 x i1> %mask)
764 define void @masked_scatter_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 {
765 ; CHECK-LABEL: masked_scatter_v2f64:
766 ; CHECK: ldr q[[VALS:[0-9]+]], [x0]
767 ; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
768 ; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
769 ; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0
770 ; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0
771 ; CHECK-NEXT: st1d { z[[VALS]].d }, [[MASK]], [z[[PTRS]].d]
773 %vals = load <2 x double>, <2 x double>* %a
774 %ptrs = load <2 x double*>, <2 x double*>* %b
775 %mask = fcmp oeq <2 x double> %vals, zeroinitializer
776 call void @llvm.masked.scatter.v2f64(<2 x double> %vals, <2 x double*> %ptrs, i32 8, <2 x i1> %mask)
780 define void @masked_scatter_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 {
781 ; CHECK-LABEL: masked_scatter_v4f64:
782 ; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4
783 ; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
784 ; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
785 ; CHECK-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
786 ; CHECK-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
788 %vals = load <4 x double>, <4 x double>* %a
789 %ptrs = load <4 x double*>, <4 x double*>* %b
790 %mask = fcmp oeq <4 x double> %vals, zeroinitializer
791 call void @llvm.masked.scatter.v4f64(<4 x double> %vals, <4 x double*> %ptrs, i32 8, <4 x i1> %mask)
795 define void @masked_scatter_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 {
796 ; CHECK-LABEL: masked_scatter_v8f64:
797 ; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
798 ; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
799 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
800 ; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
801 ; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
802 ; VBITS_GE_512-NEXT: ret
803 %vals = load <8 x double>, <8 x double>* %a
804 %ptrs = load <8 x double*>, <8 x double*>* %b
805 %mask = fcmp oeq <8 x double> %vals, zeroinitializer
806 call void @llvm.masked.scatter.v8f64(<8 x double> %vals, <8 x double*> %ptrs, i32 8, <8 x i1> %mask)
810 define void @masked_scatter_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 {
811 ; CHECK-LABEL: masked_scatter_v16f64:
812 ; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16
813 ; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
814 ; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
815 ; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
816 ; VBITS_GE_1024-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
817 ; VBITS_GE_1024-NEXT: ret
818 %vals = load <16 x double>, <16 x double>* %a
819 %ptrs = load <16 x double*>, <16 x double*>* %b
820 %mask = fcmp oeq <16 x double> %vals, zeroinitializer
821 call void @llvm.masked.scatter.v16f64(<16 x double> %vals, <16 x double*> %ptrs, i32 8, <16 x i1> %mask)
825 define void @masked_scatter_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 {
826 ; CHECK-LABEL: masked_scatter_v32f64:
827 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
828 ; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
829 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
830 ; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
831 ; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
832 ; VBITS_GE_2048-NEXT: ret
833 %vals = load <32 x double>, <32 x double>* %a
834 %ptrs = load <32 x double*>, <32 x double*>* %b
835 %mask = fcmp oeq <32 x double> %vals, zeroinitializer
836 call void @llvm.masked.scatter.v32f64(<32 x double> %vals, <32 x double*> %ptrs, i32 8, <32 x i1> %mask)
840 ; The above tests test the types, the below tests check that the addressing
841 ; modes still function
842 define void @masked_scatter_32b_scaled_sext_f16(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 {
843 ; CHECK-LABEL: masked_scatter_32b_scaled_sext_f16:
844 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
845 ; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
846 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32
847 ; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1]
848 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
849 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
850 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[MONE]].h
851 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, [[UPK]].s, #0
852 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].s, [[VALS]].h
853 ; VBITS_GE_2048-NEXT: st1h { [[UPKV]].s }, [[MASK]], [x2, [[PTRS]].s, sxtw #1]
854 ; VBITS_GE_2048-NEXT: ret
855 %vals = load <32 x half>, <32 x half>* %a
856 %idxs = load <32 x i32>, <32 x i32>* %b
857 %ext = sext <32 x i32> %idxs to <32 x i64>
858 %ptrs = getelementptr half, half* %base, <32 x i64> %ext
859 %mask = fcmp oeq <32 x half> %vals, zeroinitializer
860 call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask)
864 define void @masked_scatter_32b_scaled_sext_f32(<32 x float>* %a, <32 x i32>* %b, float* %base) #0 {
865 ; CHECK-LABEL: masked_scatter_32b_scaled_sext_f32:
866 ; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl32
867 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG]]/z, [x0]
868 ; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG]]/z, [x1]
869 ; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].s, [[PG]]/z, [[VALS]].s, #0.0
870 ; VBITS_GE_2048-NEXT: st1w { [[VALS]].s }, [[MASK]], [x2, [[PTRS]].s, sxtw #2]
871 ; VBITS_GE_2048-NEXT: ret
872 %vals = load <32 x float>, <32 x float>* %a
873 %idxs = load <32 x i32>, <32 x i32>* %b
874 %ext = sext <32 x i32> %idxs to <32 x i64>
875 %ptrs = getelementptr float, float* %base, <32 x i64> %ext
876 %mask = fcmp oeq <32 x float> %vals, zeroinitializer
877 call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask)
881 define void @masked_scatter_32b_scaled_sext_f64(<32 x double>* %a, <32 x i32>* %b, double* %base) #0 {
882 ; CHECK-LABEL: masked_scatter_32b_scaled_sext_f64:
883 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
884 ; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
885 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32
886 ; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1]
887 ; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
888 ; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], [x2, [[PTRS]].d, sxtw #3]
889 ; VBITS_GE_2048-NEXT: ret
890 %vals = load <32 x double>, <32 x double>* %a
891 %idxs = load <32 x i32>, <32 x i32>* %b
892 %ext = sext <32 x i32> %idxs to <32 x i64>
893 %ptrs = getelementptr double, double* %base, <32 x i64> %ext
894 %mask = fcmp oeq <32 x double> %vals, zeroinitializer
895 call void @llvm.masked.scatter.v32f64(<32 x double> %vals, <32 x double*> %ptrs, i32 8, <32 x i1> %mask)
899 define void @masked_scatter_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 {
900 ; CHECK-LABEL: masked_scatter_32b_scaled_zext:
901 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
902 ; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
903 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32
904 ; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1]
905 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
906 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
907 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[MONE]].h
908 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, [[UPK]].s, #0
909 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].s, [[VALS]].h
910 ; VBITS_GE_2048-NEXT: st1h { [[UPKV]].s }, [[MASK]], [x2, [[PTRS]].s, uxtw #1]
911 ; VBITS_GE_2048-NEXT: ret
912 %vals = load <32 x half>, <32 x half>* %a
913 %idxs = load <32 x i32>, <32 x i32>* %b
914 %ext = zext <32 x i32> %idxs to <32 x i64>
915 %ptrs = getelementptr half, half* %base, <32 x i64> %ext
916 %mask = fcmp oeq <32 x half> %vals, zeroinitializer
917 call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask)
921 define void @masked_scatter_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 {
922 ; CHECK-LABEL: masked_scatter_32b_unscaled_sext:
923 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
924 ; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
925 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32
926 ; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1]
927 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
928 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
929 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[MONE]].h
930 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, [[UPK]].s, #0
931 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].s, [[VALS]].h
932 ; VBITS_GE_2048-NEXT: st1h { [[UPKV]].s }, [[MASK]], [x2, [[PTRS]].s, sxtw]
933 ; VBITS_GE_2048-NEXT: ret
934 %vals = load <32 x half>, <32 x half>* %a
935 %idxs = load <32 x i32>, <32 x i32>* %b
936 %ext = sext <32 x i32> %idxs to <32 x i64>
937 %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %ext
938 %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x half*>
939 %mask = fcmp oeq <32 x half> %vals, zeroinitializer
940 call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask)
944 define void @masked_scatter_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 {
945 ; CHECK-LABEL: masked_scatter_32b_unscaled_zext:
946 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
947 ; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
948 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].s, vl32
949 ; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].s }, [[PG1]]/z, [x1]
950 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
951 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
952 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[MONE]].h
953 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].s, [[PG1]]/z, [[UPK]].s, #0
954 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].s, [[VALS]].h
955 ; VBITS_GE_2048-NEXT: st1h { [[UPKV]].s }, [[MASK]], [x2, [[PTRS]].s, uxtw]
956 ; VBITS_GE_2048-NEXT: ret
957 %vals = load <32 x half>, <32 x half>* %a
958 %idxs = load <32 x i32>, <32 x i32>* %b
959 %ext = zext <32 x i32> %idxs to <32 x i64>
960 %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %ext
961 %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x half*>
962 %mask = fcmp oeq <32 x half> %vals, zeroinitializer
963 call void @llvm.masked.scatter.v32f16(<32 x half> %vals, <32 x half*> %ptrs, i32 8, <32 x i1> %mask)
967 define void @masked_scatter_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float* %base) #0 {
968 ; CHECK-LABEL: masked_scatter_64b_scaled:
969 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
970 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
971 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
972 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
973 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
974 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
975 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
976 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
977 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
978 ; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #2]
979 ; VBITS_GE_2048-NEXT: ret
980 %vals = load <32 x float>, <32 x float>* %a
981 %idxs = load <32 x i64>, <32 x i64>* %b
982 %ptrs = getelementptr float, float* %base, <32 x i64> %idxs
983 %mask = fcmp oeq <32 x float> %vals, zeroinitializer
984 call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask)
988 define void @masked_scatter_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %base) #0 {
989 ; CHECK-LABEL: masked_scatter_64b_unscaled:
990 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
991 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
992 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
993 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
994 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
995 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
996 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
997 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
998 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
999 ; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], [x2, [[PTRS]].d]
1000 ; VBITS_GE_2048-NEXT: ret
1001 %vals = load <32 x float>, <32 x float>* %a
1002 %idxs = load <32 x i64>, <32 x i64>* %b
1003 %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %idxs
1004 %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*>
1005 %mask = fcmp oeq <32 x float> %vals, zeroinitializer
1006 call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask)
1010 ; FIXME: This case does not yet codegen well due to deficiencies in opcode selection
1011 define void @masked_scatter_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %off) #0 {
1012 ; CHECK-LABEL: masked_scatter_vec_plus_reg:
1013 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
1014 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
1015 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
1016 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
1017 ; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, x2
1018 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
1019 ; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d
1020 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
1021 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
1022 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
1023 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
1024 ; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS_ADD]].d]
1025 ; VBITS_GE_2048-NEXT: ret
1026 %vals = load <32 x float>, <32 x float>* %a
1027 %bases = load <32 x i8*>, <32 x i8*>* %b
1028 %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 %off
1029 %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*>
1030 %mask = fcmp oeq <32 x float> %vals, zeroinitializer
1031 call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask)
1035 ; FIXME: This case does not yet codegen well due to deficiencies in opcode selection
1036 define void @masked_scatter_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 {
1037 ; CHECK-LABEL: masked_scatter_vec_plus_imm:
1038 ; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
1039 ; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
1040 ; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
1041 ; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
1042 ; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, #4
1043 ; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
1044 ; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d
1045 ; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
1046 ; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
1047 ; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
1048 ; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
1049 ; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS_ADD]].d]
1050 ; VBITS_GE_2048-NEXT: ret
1051 %vals = load <32 x float>, <32 x float>* %a
1052 %bases = load <32 x i8*>, <32 x i8*>* %b
1053 %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 4
1054 %ptrs = bitcast <32 x i8*> %byte_ptrs to <32 x float*>
1055 %mask = fcmp oeq <32 x float> %vals, zeroinitializer
1056 call void @llvm.masked.scatter.v32f32(<32 x float> %vals, <32 x float*> %ptrs, i32 8, <32 x i1> %mask)
1060 ; extract_subvec(...(insert_subvec(a,b,c))) -> extract_subvec(bitcast(b),d) like
1061 ; combines can effectively unlegalise bitcast operations. This test ensures such
1062 ; combines do not happen after operation legalisation. When not prevented the
1063 ; test triggers infinite combine->legalise->combine->...
1065 ; NOTE: For this test to function correctly it's critical for %vals to be in a
1066 ; different block to the scatter store. If not, the problematic bitcast will be
1067 ; removed before operation legalisation and thus not exercise the combine.
1068 define void @masked_scatter_bitcast_infinite_loop(<8 x double>* %a, <8 x double*>* %b, i1 %cond) #0 {
1069 ; CHECK-LABEL: masked_scatter_bitcast_infinite_loop
1070 ; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
1071 ; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
1072 ; VBITS_GE_512-NEXT: tbz w2, #0, [[LABEL:.*]]
1073 ; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
1074 ; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
1075 ; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
1076 ; VBITS_GE_512-NEXT: [[LABEL]]:
1077 ; VBITS_GE_512-NEXT: ret
1078 %vals = load volatile <8 x double>, <8 x double>* %a
1079 br i1 %cond, label %bb.1, label %bb.2
1082 %ptrs = load <8 x double*>, <8 x double*>* %b
1083 %mask = fcmp oeq <8 x double> %vals, zeroinitializer
1084 call void @llvm.masked.scatter.v8f64(<8 x double> %vals, <8 x double*> %ptrs, i32 8, <8 x i1> %mask)
1091 declare void @llvm.masked.scatter.v2i8(<2 x i8>, <2 x i8*>, i32, <2 x i1>)
1092 declare void @llvm.masked.scatter.v4i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
1093 declare void @llvm.masked.scatter.v8i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
1094 declare void @llvm.masked.scatter.v16i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>)
1095 declare void @llvm.masked.scatter.v32i8(<32 x i8>, <32 x i8*>, i32, <32 x i1>)
1097 declare void @llvm.masked.scatter.v2i16(<2 x i16>, <2 x i16*>, i32, <2 x i1>)
1098 declare void @llvm.masked.scatter.v4i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
1099 declare void @llvm.masked.scatter.v8i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
1100 declare void @llvm.masked.scatter.v16i16(<16 x i16>, <16 x i16*>, i32, <16 x i1>)
1101 declare void @llvm.masked.scatter.v32i16(<32 x i16>, <32 x i16*>, i32, <32 x i1>)
1103 declare void @llvm.masked.scatter.v2i32(<2 x i32>, <2 x i32*>, i32, <2 x i1>)
1104 declare void @llvm.masked.scatter.v4i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
1105 declare void @llvm.masked.scatter.v8i32(<8 x i32>, <8 x i32*>, i32, <8 x i1>)
1106 declare void @llvm.masked.scatter.v16i32(<16 x i32>, <16 x i32*>, i32, <16 x i1>)
1107 declare void @llvm.masked.scatter.v32i32(<32 x i32>, <32 x i32*>, i32, <32 x i1>)
1109 declare void @llvm.masked.scatter.v1i64(<1 x i64>, <1 x i64*>, i32, <1 x i1>)
1110 declare void @llvm.masked.scatter.v2i64(<2 x i64>, <2 x i64*>, i32, <2 x i1>)
1111 declare void @llvm.masked.scatter.v4i64(<4 x i64>, <4 x i64*>, i32, <4 x i1>)
1112 declare void @llvm.masked.scatter.v8i64(<8 x i64>, <8 x i64*>, i32, <8 x i1>)
1113 declare void @llvm.masked.scatter.v16i64(<16 x i64>, <16 x i64*>, i32, <16 x i1>)
1114 declare void @llvm.masked.scatter.v32i64(<32 x i64>, <32 x i64*>, i32, <32 x i1>)
1116 declare void @llvm.masked.scatter.v2f16(<2 x half>, <2 x half*>, i32, <2 x i1>)
1117 declare void @llvm.masked.scatter.v4f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
1118 declare void @llvm.masked.scatter.v8f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
1119 declare void @llvm.masked.scatter.v16f16(<16 x half>, <16 x half*>, i32, <16 x i1>)
1120 declare void @llvm.masked.scatter.v32f16(<32 x half>, <32 x half*>, i32, <32 x i1>)
1122 declare void @llvm.masked.scatter.v2f32(<2 x float>, <2 x float*>, i32, <2 x i1>)
1123 declare void @llvm.masked.scatter.v4f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
1124 declare void @llvm.masked.scatter.v8f32(<8 x float>, <8 x float*>, i32, <8 x i1>)
1125 declare void @llvm.masked.scatter.v16f32(<16 x float>, <16 x float*>, i32, <16 x i1>)
1126 declare void @llvm.masked.scatter.v32f32(<32 x float>, <32 x float*>, i32, <32 x i1>)
1128 declare void @llvm.masked.scatter.v1f64(<1 x double>, <1 x double*>, i32, <1 x i1>)
1129 declare void @llvm.masked.scatter.v2f64(<2 x double>, <2 x double*>, i32, <2 x i1>)
1130 declare void @llvm.masked.scatter.v4f64(<4 x double>, <4 x double*>, i32, <4 x i1>)
1131 declare void @llvm.masked.scatter.v8f64(<8 x double>, <8 x double*>, i32, <8 x i1>)
1132 declare void @llvm.masked.scatter.v16f64(<16 x double>, <16 x double*>, i32, <16 x i1>)
1133 declare void @llvm.masked.scatter.v32f64(<32 x double>, <32 x double*>, i32, <32 x i1>)
1135 attributes #0 = { "target-features"="+sve" }