1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=dse -S | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 %struct.vec2 = type { <4 x i32>, <4 x i32> }
6 %struct.vec2plusi = type { <4 x i32>, <4 x i32>, i32 }
8 @glob1 = global %struct.vec2 zeroinitializer, align 16
9 @glob2 = global %struct.vec2plusi zeroinitializer, align 16
11 define void @write24to28(ptr nocapture %p) nounwind uwtable ssp {
12 ; CHECK-LABEL: @write24to28(
14 ; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
15 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 24, i1 false)
16 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
17 ; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX1]], align 4
18 ; CHECK-NEXT: ret void
21 %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
22 call void @llvm.memset.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i1 false)
23 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
24 store i32 1, ptr %arrayidx1, align 4
28 define void @write24to28_atomic(ptr nocapture %p) nounwind uwtable ssp {
29 ; CHECK-LABEL: @write24to28_atomic(
31 ; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
32 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 24, i32 4)
33 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
34 ; CHECK-NEXT: store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
35 ; CHECK-NEXT: ret void
38 %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
39 call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i32 4)
40 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
41 store atomic i32 1, ptr %arrayidx1 unordered, align 4
45 ; Atomicity of the store is weaker from the memset
46 define void @write24to28_atomic_weaker(ptr nocapture %p) nounwind uwtable ssp {
47 ; CHECK-LABEL: @write24to28_atomic_weaker(
49 ; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 1
50 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[ARRAYIDX0]], i8 0, i64 24, i32 4)
51 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
52 ; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX1]], align 4
53 ; CHECK-NEXT: ret void
56 %arrayidx0 = getelementptr inbounds i32, ptr %p, i64 1
57 call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %arrayidx0, i8 0, i64 28, i32 4)
58 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
59 store i32 1, ptr %arrayidx1, align 4
63 define void @write28to32(ptr nocapture %p) nounwind uwtable ssp {
64 ; CHECK-LABEL: @write28to32(
66 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[P:%.*]], i8 0, i64 28, i1 false)
67 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
68 ; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX1]], align 4
69 ; CHECK-NEXT: ret void
72 call void @llvm.memset.p0.i64(ptr align 4 %p, i8 0, i64 32, i1 false)
73 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
74 store i32 1, ptr %arrayidx1, align 4
78 define void @write28to32_atomic(ptr nocapture %p) nounwind uwtable ssp {
79 ; CHECK-LABEL: @write28to32_atomic(
81 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 [[P:%.*]], i8 0, i64 28, i32 4)
82 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
83 ; CHECK-NEXT: store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
84 ; CHECK-NEXT: ret void
87 call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 4 %p, i8 0, i64 32, i32 4)
88 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
89 store atomic i32 1, ptr %arrayidx1 unordered, align 4
93 define void @dontwrite28to32memset(ptr nocapture %p) nounwind uwtable ssp {
94 ; CHECK-LABEL: @dontwrite28to32memset(
96 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[P:%.*]], i8 0, i64 32, i1 false)
97 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
98 ; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX1]], align 4
99 ; CHECK-NEXT: ret void
102 call void @llvm.memset.p0.i64(ptr align 16 %p, i8 0, i64 32, i1 false)
103 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
104 store i32 1, ptr %arrayidx1, align 4
108 define void @dontwrite28to32memset_atomic(ptr nocapture %p) nounwind uwtable ssp {
109 ; CHECK-LABEL: @dontwrite28to32memset_atomic(
111 ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 16 [[P:%.*]], i8 0, i64 32, i32 4)
112 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 7
113 ; CHECK-NEXT: store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
114 ; CHECK-NEXT: ret void
117 call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 16 %p, i8 0, i64 32, i32 4)
118 %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 7
119 store atomic i32 1, ptr %arrayidx1 unordered, align 4
123 define void @write32to36(ptr nocapture %p) nounwind uwtable ssp {
124 ; CHECK-LABEL: @write32to36(
126 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob2, i64 32, i1 false)
127 ; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], ptr [[P]], i64 0, i32 2
128 ; CHECK-NEXT: store i32 1, ptr [[C]], align 4
129 ; CHECK-NEXT: ret void
132 tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob2, i64 36, i1 false)
133 %c = getelementptr inbounds %struct.vec2plusi, ptr %p, i64 0, i32 2
134 store i32 1, ptr %c, align 4
138 define void @write32to36_atomic(ptr nocapture %p) nounwind uwtable ssp {
139 ; CHECK-LABEL: @write32to36_atomic(
141 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob2, i64 32, i32 4)
142 ; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], ptr [[P]], i64 0, i32 2
143 ; CHECK-NEXT: store atomic i32 1, ptr [[C]] unordered, align 4
144 ; CHECK-NEXT: ret void
147 tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob2, i64 36, i32 4)
148 %c = getelementptr inbounds %struct.vec2plusi, ptr %p, i64 0, i32 2
149 store atomic i32 1, ptr %c unordered, align 4
153 ; Atomicity of the store is weaker than the memcpy
154 define void @write32to36_atomic_weaker(ptr nocapture %p) nounwind uwtable ssp {
155 ; CHECK-LABEL: @write32to36_atomic_weaker(
157 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob2, i64 32, i32 4)
158 ; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2PLUSI:%.*]], ptr [[P]], i64 0, i32 2
159 ; CHECK-NEXT: store i32 1, ptr [[C]], align 4
160 ; CHECK-NEXT: ret void
163 tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob2, i64 36, i32 4)
164 %c = getelementptr inbounds %struct.vec2plusi, ptr %p, i64 0, i32 2
165 store i32 1, ptr %c, align 4
169 define void @write16to32(ptr nocapture %p) nounwind uwtable ssp {
170 ; CHECK-LABEL: @write16to32(
172 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 16, i1 false)
173 ; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 1
174 ; CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[C]], align 4
175 ; CHECK-NEXT: ret void
178 tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i1 false)
179 %c = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 1
180 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %c, align 4
184 define void @write16to32_atomic(ptr nocapture %p) nounwind uwtable ssp {
185 ; CHECK-LABEL: @write16to32_atomic(
187 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 16, i32 4)
188 ; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 1
189 ; CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[C]], align 4
190 ; CHECK-NEXT: ret void
193 tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i32 4)
194 %c = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 1
195 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr %c, align 4
199 define void @dontwrite28to32memcpy(ptr nocapture %p) nounwind uwtable ssp {
200 ; CHECK-LABEL: @dontwrite28to32memcpy(
202 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 32, i1 false)
203 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 0, i64 7
204 ; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX1]], align 4
205 ; CHECK-NEXT: ret void
208 tail call void @llvm.memcpy.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i1 false)
209 %arrayidx1 = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 0, i64 7
210 store i32 1, ptr %arrayidx1, align 4
214 define void @dontwrite28to32memcpy_atomic(ptr nocapture %p) nounwind uwtable ssp {
215 ; CHECK-LABEL: @dontwrite28to32memcpy_atomic(
217 ; CHECK-NEXT: tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 [[P:%.*]], ptr align 16 @glob1, i64 32, i32 4)
218 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_VEC2:%.*]], ptr [[P]], i64 0, i32 0, i64 7
219 ; CHECK-NEXT: store atomic i32 1, ptr [[ARRAYIDX1]] unordered, align 4
220 ; CHECK-NEXT: ret void
223 tail call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 16 %p, ptr align 16 @glob1, i64 32, i32 4)
224 %arrayidx1 = getelementptr inbounds %struct.vec2, ptr %p, i64 0, i32 0, i64 7
225 store atomic i32 1, ptr %arrayidx1 unordered, align 4
229 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
230 declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind
231 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
232 declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind
234 %struct.trapframe = type { i64, i64, i64 }
236 ; bugzilla 11455 - make sure negative GEP's don't break this optimisation
237 define void @cpu_lwp_fork(ptr %md_regs, i64 %pcb_rsp0) nounwind uwtable noinline ssp {
238 ; CHECK-LABEL: @cpu_lwp_fork(
240 ; CHECK-NEXT: [[TMP0:%.*]] = inttoptr i64 [[PCB_RSP0:%.*]] to ptr
241 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME:%.*]], ptr [[TMP0]], i64 -1
242 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[ADD_PTR]], ptr [[MD_REGS:%.*]], i64 24, i1 false)
243 ; CHECK-NEXT: [[TF_TRAPNO:%.*]] = getelementptr inbounds [[STRUCT_TRAPFRAME]], ptr [[TMP0]], i64 -1, i32 1
244 ; CHECK-NEXT: store i64 3, ptr [[TF_TRAPNO]], align 8
245 ; CHECK-NEXT: ret void
248 %0 = inttoptr i64 %pcb_rsp0 to ptr
249 %add.ptr = getelementptr inbounds %struct.trapframe, ptr %0, i64 -1
250 call void @llvm.memcpy.p0.p0.i64(ptr %add.ptr, ptr %md_regs, i64 24, i1 false)
251 %tf_trapno = getelementptr inbounds %struct.trapframe, ptr %0, i64 -1, i32 1
252 store i64 3, ptr %tf_trapno, align 8
256 define void @write16To23AndThen24To31(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
257 ; CHECK-LABEL: @write16To23AndThen24To31(
259 ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i1 false)
260 ; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
261 ; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
262 ; CHECK-NEXT: store i64 3, ptr [[BASE64_2]]
263 ; CHECK-NEXT: store i64 3, ptr [[BASE64_3]]
264 ; CHECK-NEXT: ret void
268 tail call void @llvm.memset.p0.i64(ptr align 8 %P, i8 0, i64 32, i1 false)
270 %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
271 %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
273 store i64 3, ptr %base64_2
274 store i64 3, ptr %base64_3
278 define void @write16To23AndThen24To31_atomic(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
279 ; CHECK-LABEL: @write16To23AndThen24To31_atomic(
281 ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i32 8)
282 ; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
283 ; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
284 ; CHECK-NEXT: store atomic i64 3, ptr [[BASE64_2]] unordered, align 8
285 ; CHECK-NEXT: store atomic i64 3, ptr [[BASE64_3]] unordered, align 8
286 ; CHECK-NEXT: ret void
290 tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
292 %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
293 %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
295 store atomic i64 3, ptr %base64_2 unordered, align 8
296 store atomic i64 3, ptr %base64_3 unordered, align 8
300 define void @write16To23AndThen24To31_atomic_weaker1(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
301 ; CHECK-LABEL: @write16To23AndThen24To31_atomic_weaker1(
303 ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i32 8)
304 ; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
305 ; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
306 ; CHECK-NEXT: store i64 3, ptr [[BASE64_2]], align 8
307 ; CHECK-NEXT: store atomic i64 3, ptr [[BASE64_3]] unordered, align 8
308 ; CHECK-NEXT: ret void
312 tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
314 %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
315 %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
317 store i64 3, ptr %base64_2, align 8
318 store atomic i64 3, ptr %base64_3 unordered, align 8
322 define void @write16To23AndThen24To31_atomic_weaker2(ptr nocapture %P, i64 %n64, i32 %n32, i16 %n16, i8 %n8) {
323 ; CHECK-LABEL: @write16To23AndThen24To31_atomic_weaker2(
325 ; CHECK-NEXT: tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 [[P:%.*]], i8 0, i64 16, i32 8)
326 ; CHECK-NEXT: [[BASE64_2:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 2
327 ; CHECK-NEXT: [[BASE64_3:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 3
328 ; CHECK-NEXT: store atomic i64 3, ptr [[BASE64_2]] unordered, align 8
329 ; CHECK-NEXT: store i64 3, ptr [[BASE64_3]], align 8
330 ; CHECK-NEXT: ret void
334 tail call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 8 %P, i8 0, i64 32, i32 8)
336 %base64_2 = getelementptr inbounds i64, ptr %P, i64 2
337 %base64_3 = getelementptr inbounds i64, ptr %P, i64 3
339 store atomic i64 3, ptr %base64_2 unordered, align 8
340 store i64 3, ptr %base64_3, align 8
344 define void @ow_end_align1(ptr nocapture %p) {
345 ; CHECK-LABEL: @ow_end_align1(
347 ; CHECK-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
348 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[P1]], i8 0, i64 27, i1 false)
349 ; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 27
350 ; CHECK-NEXT: store i64 1, ptr [[P2]], align 1
351 ; CHECK-NEXT: ret void
354 %p1 = getelementptr inbounds i8, ptr %p, i64 1
355 call void @llvm.memset.p0.i64(ptr align 1 %p1, i8 0, i64 32, i1 false)
356 %p2 = getelementptr inbounds i8, ptr %p1, i64 27
357 store i64 1, ptr %p2, align 1
361 define void @ow_end_align4(ptr nocapture %p) {
362 ; CHECK-LABEL: @ow_end_align4(
364 ; CHECK-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
365 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[P1]], i8 0, i64 28, i1 false)
366 ; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 27
367 ; CHECK-NEXT: store i64 1, ptr [[P2]], align 1
368 ; CHECK-NEXT: ret void
371 %p1 = getelementptr inbounds i8, ptr %p, i64 1
372 call void @llvm.memset.p0.i64(ptr align 4 %p1, i8 0, i64 32, i1 false)
373 %p2 = getelementptr inbounds i8, ptr %p1, i64 27
374 store i64 1, ptr %p2, align 1
378 define void @ow_end_align8(ptr nocapture %p) {
379 ; CHECK-LABEL: @ow_end_align8(
381 ; CHECK-NEXT: [[P1:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 1
382 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[P1]], i8 0, i64 32, i1 false)
383 ; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 27
384 ; CHECK-NEXT: store i64 1, ptr [[P2]], align 1
385 ; CHECK-NEXT: ret void
388 %p1 = getelementptr inbounds i8, ptr %p, i64 1
389 call void @llvm.memset.p0.i64(ptr align 8 %p1, i8 0, i64 32, i1 false)
390 %p2 = getelementptr inbounds i8, ptr %p1, i64 27
391 store i64 1, ptr %p2, align 1