1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SLM
4 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
5 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
6 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F
7 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512BW
8 ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -basicaa -slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX256BW
10 @a64 = common global [8 x i64] zeroinitializer, align 64
11 @b64 = common global [8 x i64] zeroinitializer, align 64
12 @c64 = common global [8 x i64] zeroinitializer, align 64
13 @a32 = common global [16 x i32] zeroinitializer, align 64
14 @b32 = common global [16 x i32] zeroinitializer, align 64
15 @c32 = common global [16 x i32] zeroinitializer, align 64
16 @a16 = common global [32 x i16] zeroinitializer, align 64
17 @b16 = common global [32 x i16] zeroinitializer, align 64
18 @c16 = common global [32 x i16] zeroinitializer, align 64
19 @a8 = common global [64 x i8] zeroinitializer, align 64
20 @b8 = common global [64 x i8] zeroinitializer, align 64
21 @c8 = common global [64 x i8] zeroinitializer, align 64
23 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64)
24 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
25 declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16)
26 declare {i8 , i1} @llvm.sadd.with.overflow.i8 (i8 , i8 )
28 define void @add_v8i64() {
29 ; CHECK-LABEL: @add_v8i64(
30 ; CHECK-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
31 ; CHECK-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
32 ; CHECK-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
33 ; CHECK-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
34 ; CHECK-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
35 ; CHECK-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
36 ; CHECK-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
37 ; CHECK-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
38 ; CHECK-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
39 ; CHECK-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
40 ; CHECK-NEXT: [[B2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
41 ; CHECK-NEXT: [[B3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
42 ; CHECK-NEXT: [[B4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
43 ; CHECK-NEXT: [[B5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
44 ; CHECK-NEXT: [[B6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
45 ; CHECK-NEXT: [[B7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
46 ; CHECK-NEXT: [[C0:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A0]], i64 [[B0]])
47 ; CHECK-NEXT: [[C1:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A1]], i64 [[B1]])
48 ; CHECK-NEXT: [[C2:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A2]], i64 [[B2]])
49 ; CHECK-NEXT: [[C3:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A3]], i64 [[B3]])
50 ; CHECK-NEXT: [[C4:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A4]], i64 [[B4]])
51 ; CHECK-NEXT: [[C5:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A5]], i64 [[B5]])
52 ; CHECK-NEXT: [[C6:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A6]], i64 [[B6]])
53 ; CHECK-NEXT: [[C7:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A7]], i64 [[B7]])
54 ; CHECK-NEXT: [[R0:%.*]] = extractvalue { i64, i1 } [[C0]], 0
55 ; CHECK-NEXT: [[R1:%.*]] = extractvalue { i64, i1 } [[C1]], 0
56 ; CHECK-NEXT: [[R2:%.*]] = extractvalue { i64, i1 } [[C2]], 0
57 ; CHECK-NEXT: [[R3:%.*]] = extractvalue { i64, i1 } [[C3]], 0
58 ; CHECK-NEXT: [[R4:%.*]] = extractvalue { i64, i1 } [[C4]], 0
59 ; CHECK-NEXT: [[R5:%.*]] = extractvalue { i64, i1 } [[C5]], 0
60 ; CHECK-NEXT: [[R6:%.*]] = extractvalue { i64, i1 } [[C6]], 0
61 ; CHECK-NEXT: [[R7:%.*]] = extractvalue { i64, i1 } [[C7]], 0
62 ; CHECK-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
63 ; CHECK-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
64 ; CHECK-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
65 ; CHECK-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
66 ; CHECK-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
67 ; CHECK-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
68 ; CHECK-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
69 ; CHECK-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
70 ; CHECK-NEXT: ret void
72 %a0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 0), align 8
73 %a1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 1), align 8
74 %a2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 2), align 8
75 %a3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 3), align 8
76 %a4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 4), align 8
77 %a5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 5), align 8
78 %a6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 6), align 8
79 %a7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 0, i64 7), align 8
80 %b0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 0), align 8
81 %b1 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 1), align 8
82 %b2 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 2), align 8
83 %b3 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 3), align 8
84 %b4 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 4), align 8
85 %b5 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 5), align 8
86 %b6 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 6), align 8
87 %b7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 0, i64 7), align 8
88 %c0 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a0, i64 %b0)
89 %c1 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a1, i64 %b1)
90 %c2 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a2, i64 %b2)
91 %c3 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a3, i64 %b3)
92 %c4 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a4, i64 %b4)
93 %c5 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a5, i64 %b5)
94 %c6 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a6, i64 %b6)
95 %c7 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a7, i64 %b7)
96 %r0 = extractvalue {i64, i1} %c0, 0
97 %r1 = extractvalue {i64, i1} %c1, 0
98 %r2 = extractvalue {i64, i1} %c2, 0
99 %r3 = extractvalue {i64, i1} %c3, 0
100 %r4 = extractvalue {i64, i1} %c4, 0
101 %r5 = extractvalue {i64, i1} %c5, 0
102 %r6 = extractvalue {i64, i1} %c6, 0
103 %r7 = extractvalue {i64, i1} %c7, 0
104 store i64 %r0, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 0), align 8
105 store i64 %r1, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 1), align 8
106 store i64 %r2, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2), align 8
107 store i64 %r3, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 3), align 8
108 store i64 %r4, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4), align 8
109 store i64 %r5, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 5), align 8
110 store i64 %r6, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6), align 8
111 store i64 %r7, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 7), align 8
115 define void @add_v16i32() {
116 ; CHECK-LABEL: @add_v16i32(
117 ; CHECK-NEXT: [[A0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0), align 4
118 ; CHECK-NEXT: [[A1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1), align 4
119 ; CHECK-NEXT: [[A2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2), align 4
120 ; CHECK-NEXT: [[A3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3), align 4
121 ; CHECK-NEXT: [[A4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4), align 4
122 ; CHECK-NEXT: [[A5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5), align 4
123 ; CHECK-NEXT: [[A6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6), align 4
124 ; CHECK-NEXT: [[A7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7), align 4
125 ; CHECK-NEXT: [[A8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8), align 4
126 ; CHECK-NEXT: [[A9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9), align 4
127 ; CHECK-NEXT: [[A10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
128 ; CHECK-NEXT: [[A11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
129 ; CHECK-NEXT: [[A12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
130 ; CHECK-NEXT: [[A13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
131 ; CHECK-NEXT: [[A14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
132 ; CHECK-NEXT: [[A15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
133 ; CHECK-NEXT: [[B0:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0), align 4
134 ; CHECK-NEXT: [[B1:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1), align 4
135 ; CHECK-NEXT: [[B2:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2), align 4
136 ; CHECK-NEXT: [[B3:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3), align 4
137 ; CHECK-NEXT: [[B4:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4), align 4
138 ; CHECK-NEXT: [[B5:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5), align 4
139 ; CHECK-NEXT: [[B6:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6), align 4
140 ; CHECK-NEXT: [[B7:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7), align 4
141 ; CHECK-NEXT: [[B8:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8), align 4
142 ; CHECK-NEXT: [[B9:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9), align 4
143 ; CHECK-NEXT: [[B10:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
144 ; CHECK-NEXT: [[B11:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
145 ; CHECK-NEXT: [[B12:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
146 ; CHECK-NEXT: [[B13:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
147 ; CHECK-NEXT: [[B14:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
148 ; CHECK-NEXT: [[B15:%.*]] = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
149 ; CHECK-NEXT: [[C0:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A0]], i32 [[B0]])
150 ; CHECK-NEXT: [[C1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A1]], i32 [[B1]])
151 ; CHECK-NEXT: [[C2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A2]], i32 [[B2]])
152 ; CHECK-NEXT: [[C3:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A3]], i32 [[B3]])
153 ; CHECK-NEXT: [[C4:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A4]], i32 [[B4]])
154 ; CHECK-NEXT: [[C5:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A5]], i32 [[B5]])
155 ; CHECK-NEXT: [[C6:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A6]], i32 [[B6]])
156 ; CHECK-NEXT: [[C7:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A7]], i32 [[B7]])
157 ; CHECK-NEXT: [[C8:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A8]], i32 [[B8]])
158 ; CHECK-NEXT: [[C9:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A9]], i32 [[B9]])
159 ; CHECK-NEXT: [[C10:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A10]], i32 [[B10]])
160 ; CHECK-NEXT: [[C11:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A11]], i32 [[B11]])
161 ; CHECK-NEXT: [[C12:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A12]], i32 [[B12]])
162 ; CHECK-NEXT: [[C13:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A13]], i32 [[B13]])
163 ; CHECK-NEXT: [[C14:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A14]], i32 [[B14]])
164 ; CHECK-NEXT: [[C15:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A15]], i32 [[B15]])
165 ; CHECK-NEXT: [[R0:%.*]] = extractvalue { i32, i1 } [[C0]], 0
166 ; CHECK-NEXT: [[R1:%.*]] = extractvalue { i32, i1 } [[C1]], 0
167 ; CHECK-NEXT: [[R2:%.*]] = extractvalue { i32, i1 } [[C2]], 0
168 ; CHECK-NEXT: [[R3:%.*]] = extractvalue { i32, i1 } [[C3]], 0
169 ; CHECK-NEXT: [[R4:%.*]] = extractvalue { i32, i1 } [[C4]], 0
170 ; CHECK-NEXT: [[R5:%.*]] = extractvalue { i32, i1 } [[C5]], 0
171 ; CHECK-NEXT: [[R6:%.*]] = extractvalue { i32, i1 } [[C6]], 0
172 ; CHECK-NEXT: [[R7:%.*]] = extractvalue { i32, i1 } [[C7]], 0
173 ; CHECK-NEXT: [[R8:%.*]] = extractvalue { i32, i1 } [[C8]], 0
174 ; CHECK-NEXT: [[R9:%.*]] = extractvalue { i32, i1 } [[C9]], 0
175 ; CHECK-NEXT: [[R10:%.*]] = extractvalue { i32, i1 } [[C10]], 0
176 ; CHECK-NEXT: [[R11:%.*]] = extractvalue { i32, i1 } [[C11]], 0
177 ; CHECK-NEXT: [[R12:%.*]] = extractvalue { i32, i1 } [[C12]], 0
178 ; CHECK-NEXT: [[R13:%.*]] = extractvalue { i32, i1 } [[C13]], 0
179 ; CHECK-NEXT: [[R14:%.*]] = extractvalue { i32, i1 } [[C14]], 0
180 ; CHECK-NEXT: [[R15:%.*]] = extractvalue { i32, i1 } [[C15]], 0
181 ; CHECK-NEXT: store i32 [[R0]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0), align 4
182 ; CHECK-NEXT: store i32 [[R1]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1), align 4
183 ; CHECK-NEXT: store i32 [[R2]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2), align 4
184 ; CHECK-NEXT: store i32 [[R3]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3), align 4
185 ; CHECK-NEXT: store i32 [[R4]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4), align 4
186 ; CHECK-NEXT: store i32 [[R5]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5), align 4
187 ; CHECK-NEXT: store i32 [[R6]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6), align 4
188 ; CHECK-NEXT: store i32 [[R7]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7), align 4
189 ; CHECK-NEXT: store i32 [[R8]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8), align 4
190 ; CHECK-NEXT: store i32 [[R9]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9), align 4
191 ; CHECK-NEXT: store i32 [[R10]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
192 ; CHECK-NEXT: store i32 [[R11]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
193 ; CHECK-NEXT: store i32 [[R12]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
194 ; CHECK-NEXT: store i32 [[R13]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
195 ; CHECK-NEXT: store i32 [[R14]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
196 ; CHECK-NEXT: store i32 [[R15]], i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
197 ; CHECK-NEXT: ret void
199 %a0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 0 ), align 4
200 %a1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 1 ), align 4
201 %a2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 2 ), align 4
202 %a3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 3 ), align 4
203 %a4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 4 ), align 4
204 %a5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 5 ), align 4
205 %a6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 6 ), align 4
206 %a7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 7 ), align 4
207 %a8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 8 ), align 4
208 %a9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 9 ), align 4
209 %a10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 10), align 4
210 %a11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 11), align 4
211 %a12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 12), align 4
212 %a13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 13), align 4
213 %a14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 14), align 4
214 %a15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @a32, i32 0, i64 15), align 4
215 %b0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 0 ), align 4
216 %b1 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 1 ), align 4
217 %b2 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 2 ), align 4
218 %b3 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 3 ), align 4
219 %b4 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 4 ), align 4
220 %b5 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 5 ), align 4
221 %b6 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 6 ), align 4
222 %b7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 7 ), align 4
223 %b8 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 8 ), align 4
224 %b9 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 9 ), align 4
225 %b10 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 10), align 4
226 %b11 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 11), align 4
227 %b12 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 12), align 4
228 %b13 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 13), align 4
229 %b14 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 14), align 4
230 %b15 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @b32, i32 0, i64 15), align 4
231 %c0 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0 , i32 %b0 )
232 %c1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a1 , i32 %b1 )
233 %c2 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a2 , i32 %b2 )
234 %c3 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a3 , i32 %b3 )
235 %c4 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a4 , i32 %b4 )
236 %c5 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a5 , i32 %b5 )
237 %c6 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a6 , i32 %b6 )
238 %c7 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a7 , i32 %b7 )
239 %c8 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a8 , i32 %b8 )
240 %c9 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a9 , i32 %b9 )
241 %c10 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a10, i32 %b10)
242 %c11 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a11, i32 %b11)
243 %c12 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a12, i32 %b12)
244 %c13 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a13, i32 %b13)
245 %c14 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a14, i32 %b14)
246 %c15 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a15, i32 %b15)
247 %r0 = extractvalue {i32, i1} %c0 , 0
248 %r1 = extractvalue {i32, i1} %c1 , 0
249 %r2 = extractvalue {i32, i1} %c2 , 0
250 %r3 = extractvalue {i32, i1} %c3 , 0
251 %r4 = extractvalue {i32, i1} %c4 , 0
252 %r5 = extractvalue {i32, i1} %c5 , 0
253 %r6 = extractvalue {i32, i1} %c6 , 0
254 %r7 = extractvalue {i32, i1} %c7 , 0
255 %r8 = extractvalue {i32, i1} %c8 , 0
256 %r9 = extractvalue {i32, i1} %c9 , 0
257 %r10 = extractvalue {i32, i1} %c10, 0
258 %r11 = extractvalue {i32, i1} %c11, 0
259 %r12 = extractvalue {i32, i1} %c12, 0
260 %r13 = extractvalue {i32, i1} %c13, 0
261 %r14 = extractvalue {i32, i1} %c14, 0
262 %r15 = extractvalue {i32, i1} %c15, 0
263 store i32 %r0 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 0 ), align 4
264 store i32 %r1 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 1 ), align 4
265 store i32 %r2 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 2 ), align 4
266 store i32 %r3 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 3 ), align 4
267 store i32 %r4 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 4 ), align 4
268 store i32 %r5 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 5 ), align 4
269 store i32 %r6 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 6 ), align 4
270 store i32 %r7 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 7 ), align 4
271 store i32 %r8 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 8 ), align 4
272 store i32 %r9 , i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 9 ), align 4
273 store i32 %r10, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 10), align 4
274 store i32 %r11, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 11), align 4
275 store i32 %r12, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 12), align 4
276 store i32 %r13, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 13), align 4
277 store i32 %r14, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 14), align 4
278 store i32 %r15, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @c32, i32 0, i64 15), align 4
282 define void @add_v32i16() {
283 ; CHECK-LABEL: @add_v32i16(
284 ; CHECK-NEXT: [[A0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0), align 2
285 ; CHECK-NEXT: [[A1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1), align 2
286 ; CHECK-NEXT: [[A2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2), align 2
287 ; CHECK-NEXT: [[A3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3), align 2
288 ; CHECK-NEXT: [[A4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4), align 2
289 ; CHECK-NEXT: [[A5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5), align 2
290 ; CHECK-NEXT: [[A6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6), align 2
291 ; CHECK-NEXT: [[A7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7), align 2
292 ; CHECK-NEXT: [[A8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8), align 2
293 ; CHECK-NEXT: [[A9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9), align 2
294 ; CHECK-NEXT: [[A10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
295 ; CHECK-NEXT: [[A11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
296 ; CHECK-NEXT: [[A12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
297 ; CHECK-NEXT: [[A13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
298 ; CHECK-NEXT: [[A14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
299 ; CHECK-NEXT: [[A15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
300 ; CHECK-NEXT: [[A16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
301 ; CHECK-NEXT: [[A17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
302 ; CHECK-NEXT: [[A18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
303 ; CHECK-NEXT: [[A19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
304 ; CHECK-NEXT: [[A20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
305 ; CHECK-NEXT: [[A21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
306 ; CHECK-NEXT: [[A22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
307 ; CHECK-NEXT: [[A23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
308 ; CHECK-NEXT: [[A24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
309 ; CHECK-NEXT: [[A25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
310 ; CHECK-NEXT: [[A26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
311 ; CHECK-NEXT: [[A27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
312 ; CHECK-NEXT: [[A28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
313 ; CHECK-NEXT: [[A29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
314 ; CHECK-NEXT: [[A30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
315 ; CHECK-NEXT: [[A31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
316 ; CHECK-NEXT: [[B0:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0), align 2
317 ; CHECK-NEXT: [[B1:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1), align 2
318 ; CHECK-NEXT: [[B2:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2), align 2
319 ; CHECK-NEXT: [[B3:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3), align 2
320 ; CHECK-NEXT: [[B4:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4), align 2
321 ; CHECK-NEXT: [[B5:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5), align 2
322 ; CHECK-NEXT: [[B6:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6), align 2
323 ; CHECK-NEXT: [[B7:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7), align 2
324 ; CHECK-NEXT: [[B8:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8), align 2
325 ; CHECK-NEXT: [[B9:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9), align 2
326 ; CHECK-NEXT: [[B10:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
327 ; CHECK-NEXT: [[B11:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
328 ; CHECK-NEXT: [[B12:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
329 ; CHECK-NEXT: [[B13:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
330 ; CHECK-NEXT: [[B14:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
331 ; CHECK-NEXT: [[B15:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
332 ; CHECK-NEXT: [[B16:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
333 ; CHECK-NEXT: [[B17:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
334 ; CHECK-NEXT: [[B18:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
335 ; CHECK-NEXT: [[B19:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
336 ; CHECK-NEXT: [[B20:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
337 ; CHECK-NEXT: [[B21:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
338 ; CHECK-NEXT: [[B22:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
339 ; CHECK-NEXT: [[B23:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
340 ; CHECK-NEXT: [[B24:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
341 ; CHECK-NEXT: [[B25:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
342 ; CHECK-NEXT: [[B26:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
343 ; CHECK-NEXT: [[B27:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
344 ; CHECK-NEXT: [[B28:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
345 ; CHECK-NEXT: [[B29:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
346 ; CHECK-NEXT: [[B30:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
347 ; CHECK-NEXT: [[B31:%.*]] = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
348 ; CHECK-NEXT: [[C0:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A0]], i16 [[B0]])
349 ; CHECK-NEXT: [[C1:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A1]], i16 [[B1]])
350 ; CHECK-NEXT: [[C2:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A2]], i16 [[B2]])
351 ; CHECK-NEXT: [[C3:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A3]], i16 [[B3]])
352 ; CHECK-NEXT: [[C4:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A4]], i16 [[B4]])
353 ; CHECK-NEXT: [[C5:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A5]], i16 [[B5]])
354 ; CHECK-NEXT: [[C6:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A6]], i16 [[B6]])
355 ; CHECK-NEXT: [[C7:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A7]], i16 [[B7]])
356 ; CHECK-NEXT: [[C8:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A8]], i16 [[B8]])
357 ; CHECK-NEXT: [[C9:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A9]], i16 [[B9]])
358 ; CHECK-NEXT: [[C10:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A10]], i16 [[B10]])
359 ; CHECK-NEXT: [[C11:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A11]], i16 [[B11]])
360 ; CHECK-NEXT: [[C12:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A12]], i16 [[B12]])
361 ; CHECK-NEXT: [[C13:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A13]], i16 [[B13]])
362 ; CHECK-NEXT: [[C14:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A14]], i16 [[B14]])
363 ; CHECK-NEXT: [[C15:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A15]], i16 [[B15]])
364 ; CHECK-NEXT: [[C16:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A16]], i16 [[B16]])
365 ; CHECK-NEXT: [[C17:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A17]], i16 [[B17]])
366 ; CHECK-NEXT: [[C18:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A18]], i16 [[B18]])
367 ; CHECK-NEXT: [[C19:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A19]], i16 [[B19]])
368 ; CHECK-NEXT: [[C20:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A20]], i16 [[B20]])
369 ; CHECK-NEXT: [[C21:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A21]], i16 [[B21]])
370 ; CHECK-NEXT: [[C22:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A22]], i16 [[B22]])
371 ; CHECK-NEXT: [[C23:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A23]], i16 [[B23]])
372 ; CHECK-NEXT: [[C24:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A24]], i16 [[B24]])
373 ; CHECK-NEXT: [[C25:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A25]], i16 [[B25]])
374 ; CHECK-NEXT: [[C26:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A26]], i16 [[B26]])
375 ; CHECK-NEXT: [[C27:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A27]], i16 [[B27]])
376 ; CHECK-NEXT: [[C28:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A28]], i16 [[B28]])
377 ; CHECK-NEXT: [[C29:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A29]], i16 [[B29]])
378 ; CHECK-NEXT: [[C30:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A30]], i16 [[B30]])
379 ; CHECK-NEXT: [[C31:%.*]] = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 [[A31]], i16 [[B31]])
380 ; CHECK-NEXT: [[R0:%.*]] = extractvalue { i16, i1 } [[C0]], 0
381 ; CHECK-NEXT: [[R1:%.*]] = extractvalue { i16, i1 } [[C1]], 0
382 ; CHECK-NEXT: [[R2:%.*]] = extractvalue { i16, i1 } [[C2]], 0
383 ; CHECK-NEXT: [[R3:%.*]] = extractvalue { i16, i1 } [[C3]], 0
384 ; CHECK-NEXT: [[R4:%.*]] = extractvalue { i16, i1 } [[C4]], 0
385 ; CHECK-NEXT: [[R5:%.*]] = extractvalue { i16, i1 } [[C5]], 0
386 ; CHECK-NEXT: [[R6:%.*]] = extractvalue { i16, i1 } [[C6]], 0
387 ; CHECK-NEXT: [[R7:%.*]] = extractvalue { i16, i1 } [[C7]], 0
388 ; CHECK-NEXT: [[R8:%.*]] = extractvalue { i16, i1 } [[C8]], 0
389 ; CHECK-NEXT: [[R9:%.*]] = extractvalue { i16, i1 } [[C9]], 0
390 ; CHECK-NEXT: [[R10:%.*]] = extractvalue { i16, i1 } [[C10]], 0
391 ; CHECK-NEXT: [[R11:%.*]] = extractvalue { i16, i1 } [[C11]], 0
392 ; CHECK-NEXT: [[R12:%.*]] = extractvalue { i16, i1 } [[C12]], 0
393 ; CHECK-NEXT: [[R13:%.*]] = extractvalue { i16, i1 } [[C13]], 0
394 ; CHECK-NEXT: [[R14:%.*]] = extractvalue { i16, i1 } [[C14]], 0
395 ; CHECK-NEXT: [[R15:%.*]] = extractvalue { i16, i1 } [[C15]], 0
396 ; CHECK-NEXT: [[R16:%.*]] = extractvalue { i16, i1 } [[C16]], 0
397 ; CHECK-NEXT: [[R17:%.*]] = extractvalue { i16, i1 } [[C17]], 0
398 ; CHECK-NEXT: [[R18:%.*]] = extractvalue { i16, i1 } [[C18]], 0
399 ; CHECK-NEXT: [[R19:%.*]] = extractvalue { i16, i1 } [[C19]], 0
400 ; CHECK-NEXT: [[R20:%.*]] = extractvalue { i16, i1 } [[C20]], 0
401 ; CHECK-NEXT: [[R21:%.*]] = extractvalue { i16, i1 } [[C21]], 0
402 ; CHECK-NEXT: [[R22:%.*]] = extractvalue { i16, i1 } [[C22]], 0
403 ; CHECK-NEXT: [[R23:%.*]] = extractvalue { i16, i1 } [[C23]], 0
404 ; CHECK-NEXT: [[R24:%.*]] = extractvalue { i16, i1 } [[C24]], 0
405 ; CHECK-NEXT: [[R25:%.*]] = extractvalue { i16, i1 } [[C25]], 0
406 ; CHECK-NEXT: [[R26:%.*]] = extractvalue { i16, i1 } [[C26]], 0
407 ; CHECK-NEXT: [[R27:%.*]] = extractvalue { i16, i1 } [[C27]], 0
408 ; CHECK-NEXT: [[R28:%.*]] = extractvalue { i16, i1 } [[C28]], 0
409 ; CHECK-NEXT: [[R29:%.*]] = extractvalue { i16, i1 } [[C29]], 0
410 ; CHECK-NEXT: [[R30:%.*]] = extractvalue { i16, i1 } [[C30]], 0
411 ; CHECK-NEXT: [[R31:%.*]] = extractvalue { i16, i1 } [[C31]], 0
412 ; CHECK-NEXT: store i16 [[R0]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0), align 2
413 ; CHECK-NEXT: store i16 [[R1]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1), align 2
414 ; CHECK-NEXT: store i16 [[R2]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2), align 2
415 ; CHECK-NEXT: store i16 [[R3]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3), align 2
416 ; CHECK-NEXT: store i16 [[R4]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4), align 2
417 ; CHECK-NEXT: store i16 [[R5]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5), align 2
418 ; CHECK-NEXT: store i16 [[R6]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6), align 2
419 ; CHECK-NEXT: store i16 [[R7]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7), align 2
420 ; CHECK-NEXT: store i16 [[R8]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8), align 2
421 ; CHECK-NEXT: store i16 [[R9]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9), align 2
422 ; CHECK-NEXT: store i16 [[R10]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
423 ; CHECK-NEXT: store i16 [[R11]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
424 ; CHECK-NEXT: store i16 [[R12]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
425 ; CHECK-NEXT: store i16 [[R13]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
426 ; CHECK-NEXT: store i16 [[R14]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
427 ; CHECK-NEXT: store i16 [[R15]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
428 ; CHECK-NEXT: store i16 [[R16]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
429 ; CHECK-NEXT: store i16 [[R17]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
430 ; CHECK-NEXT: store i16 [[R18]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
431 ; CHECK-NEXT: store i16 [[R19]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
432 ; CHECK-NEXT: store i16 [[R20]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
433 ; CHECK-NEXT: store i16 [[R21]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
434 ; CHECK-NEXT: store i16 [[R22]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
435 ; CHECK-NEXT: store i16 [[R23]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
436 ; CHECK-NEXT: store i16 [[R24]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
437 ; CHECK-NEXT: store i16 [[R25]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
438 ; CHECK-NEXT: store i16 [[R26]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
439 ; CHECK-NEXT: store i16 [[R27]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
440 ; CHECK-NEXT: store i16 [[R28]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
441 ; CHECK-NEXT: store i16 [[R29]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
442 ; CHECK-NEXT: store i16 [[R30]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
443 ; CHECK-NEXT: store i16 [[R31]], i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
444 ; CHECK-NEXT: ret void
446 %a0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 0 ), align 2
447 %a1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 1 ), align 2
448 %a2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 2 ), align 2
449 %a3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 3 ), align 2
450 %a4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 4 ), align 2
451 %a5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 5 ), align 2
452 %a6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 6 ), align 2
453 %a7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 7 ), align 2
454 %a8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 8 ), align 2
455 %a9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 9 ), align 2
456 %a10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 10), align 2
457 %a11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 11), align 2
458 %a12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 12), align 2
459 %a13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 13), align 2
460 %a14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 14), align 2
461 %a15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 15), align 2
462 %a16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 16), align 2
463 %a17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 17), align 2
464 %a18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 18), align 2
465 %a19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 19), align 2
466 %a20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 20), align 2
467 %a21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 21), align 2
468 %a22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 22), align 2
469 %a23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 23), align 2
470 %a24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 24), align 2
471 %a25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 25), align 2
472 %a26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 26), align 2
473 %a27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 27), align 2
474 %a28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 28), align 2
475 %a29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 29), align 2
476 %a30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 30), align 2
477 %a31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @a16, i32 0, i64 31), align 2
478 %b0 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 0 ), align 2
479 %b1 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 1 ), align 2
480 %b2 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 2 ), align 2
481 %b3 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 3 ), align 2
482 %b4 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 4 ), align 2
483 %b5 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 5 ), align 2
484 %b6 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 6 ), align 2
485 %b7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 7 ), align 2
486 %b8 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 8 ), align 2
487 %b9 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 9 ), align 2
488 %b10 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 10), align 2
489 %b11 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 11), align 2
490 %b12 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 12), align 2
491 %b13 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 13), align 2
492 %b14 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 14), align 2
493 %b15 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 15), align 2
494 %b16 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 16), align 2
495 %b17 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 17), align 2
496 %b18 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 18), align 2
497 %b19 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 19), align 2
498 %b20 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 20), align 2
499 %b21 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 21), align 2
500 %b22 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 22), align 2
501 %b23 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 23), align 2
502 %b24 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 24), align 2
503 %b25 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 25), align 2
504 %b26 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 26), align 2
505 %b27 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 27), align 2
506 %b28 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 28), align 2
507 %b29 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 29), align 2
508 %b30 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 30), align 2
509 %b31 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @b16, i32 0, i64 31), align 2
510 %c0 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a0 , i16 %b0 )
511 %c1 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a1 , i16 %b1 )
512 %c2 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a2 , i16 %b2 )
513 %c3 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a3 , i16 %b3 )
514 %c4 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a4 , i16 %b4 )
515 %c5 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a5 , i16 %b5 )
516 %c6 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a6 , i16 %b6 )
517 %c7 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a7 , i16 %b7 )
518 %c8 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a8 , i16 %b8 )
519 %c9 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a9 , i16 %b9 )
520 %c10 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a10, i16 %b10)
521 %c11 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a11, i16 %b11)
522 %c12 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a12, i16 %b12)
523 %c13 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a13, i16 %b13)
524 %c14 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a14, i16 %b14)
525 %c15 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a15, i16 %b15)
526 %c16 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a16, i16 %b16)
527 %c17 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a17, i16 %b17)
528 %c18 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a18, i16 %b18)
529 %c19 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a19, i16 %b19)
530 %c20 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a20, i16 %b20)
531 %c21 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a21, i16 %b21)
532 %c22 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a22, i16 %b22)
533 %c23 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a23, i16 %b23)
534 %c24 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a24, i16 %b24)
535 %c25 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a25, i16 %b25)
536 %c26 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a26, i16 %b26)
537 %c27 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a27, i16 %b27)
538 %c28 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a28, i16 %b28)
539 %c29 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a29, i16 %b29)
540 %c30 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a30, i16 %b30)
541 %c31 = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a31, i16 %b31)
542 %r0 = extractvalue {i16, i1} %c0 , 0
543 %r1 = extractvalue {i16, i1} %c1 , 0
544 %r2 = extractvalue {i16, i1} %c2 , 0
545 %r3 = extractvalue {i16, i1} %c3 , 0
546 %r4 = extractvalue {i16, i1} %c4 , 0
547 %r5 = extractvalue {i16, i1} %c5 , 0
548 %r6 = extractvalue {i16, i1} %c6 , 0
549 %r7 = extractvalue {i16, i1} %c7 , 0
550 %r8 = extractvalue {i16, i1} %c8 , 0
551 %r9 = extractvalue {i16, i1} %c9 , 0
552 %r10 = extractvalue {i16, i1} %c10, 0
553 %r11 = extractvalue {i16, i1} %c11, 0
554 %r12 = extractvalue {i16, i1} %c12, 0
555 %r13 = extractvalue {i16, i1} %c13, 0
556 %r14 = extractvalue {i16, i1} %c14, 0
557 %r15 = extractvalue {i16, i1} %c15, 0
558 %r16 = extractvalue {i16, i1} %c16, 0
559 %r17 = extractvalue {i16, i1} %c17, 0
560 %r18 = extractvalue {i16, i1} %c18, 0
561 %r19 = extractvalue {i16, i1} %c19, 0
562 %r20 = extractvalue {i16, i1} %c20, 0
563 %r21 = extractvalue {i16, i1} %c21, 0
564 %r22 = extractvalue {i16, i1} %c22, 0
565 %r23 = extractvalue {i16, i1} %c23, 0
566 %r24 = extractvalue {i16, i1} %c24, 0
567 %r25 = extractvalue {i16, i1} %c25, 0
568 %r26 = extractvalue {i16, i1} %c26, 0
569 %r27 = extractvalue {i16, i1} %c27, 0
570 %r28 = extractvalue {i16, i1} %c28, 0
571 %r29 = extractvalue {i16, i1} %c29, 0
572 %r30 = extractvalue {i16, i1} %c30, 0
573 %r31 = extractvalue {i16, i1} %c31, 0
574 store i16 %r0 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 0 ), align 2
575 store i16 %r1 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 1 ), align 2
576 store i16 %r2 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 2 ), align 2
577 store i16 %r3 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 3 ), align 2
578 store i16 %r4 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 4 ), align 2
579 store i16 %r5 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 5 ), align 2
580 store i16 %r6 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 6 ), align 2
581 store i16 %r7 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 7 ), align 2
582 store i16 %r8 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 8 ), align 2
583 store i16 %r9 , i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 9 ), align 2
584 store i16 %r10, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 10), align 2
585 store i16 %r11, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 11), align 2
586 store i16 %r12, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 12), align 2
587 store i16 %r13, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 13), align 2
588 store i16 %r14, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 14), align 2
589 store i16 %r15, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 15), align 2
590 store i16 %r16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 16), align 2
591 store i16 %r17, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 17), align 2
592 store i16 %r18, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 18), align 2
593 store i16 %r19, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 19), align 2
594 store i16 %r20, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 20), align 2
595 store i16 %r21, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 21), align 2
596 store i16 %r22, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 22), align 2
597 store i16 %r23, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 23), align 2
598 store i16 %r24, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 24), align 2
599 store i16 %r25, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 25), align 2
600 store i16 %r26, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 26), align 2
601 store i16 %r27, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 27), align 2
602 store i16 %r28, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 28), align 2
603 store i16 %r29, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 29), align 2
604 store i16 %r30, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 30), align 2
605 store i16 %r31, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @c16, i32 0, i64 31), align 2
609 define void @add_v64i8() {
610 ; CHECK-LABEL: @add_v64i8(
611 ; CHECK-NEXT: [[A0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0), align 1
612 ; CHECK-NEXT: [[A1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1), align 1
613 ; CHECK-NEXT: [[A2:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2), align 1
614 ; CHECK-NEXT: [[A3:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3), align 1
615 ; CHECK-NEXT: [[A4:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4), align 1
616 ; CHECK-NEXT: [[A5:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5), align 1
617 ; CHECK-NEXT: [[A6:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6), align 1
618 ; CHECK-NEXT: [[A7:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7), align 1
619 ; CHECK-NEXT: [[A8:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8), align 1
620 ; CHECK-NEXT: [[A9:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9), align 1
621 ; CHECK-NEXT: [[A10:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
622 ; CHECK-NEXT: [[A11:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
623 ; CHECK-NEXT: [[A12:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
624 ; CHECK-NEXT: [[A13:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
625 ; CHECK-NEXT: [[A14:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
626 ; CHECK-NEXT: [[A15:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
627 ; CHECK-NEXT: [[A16:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
628 ; CHECK-NEXT: [[A17:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
629 ; CHECK-NEXT: [[A18:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
630 ; CHECK-NEXT: [[A19:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
631 ; CHECK-NEXT: [[A20:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
632 ; CHECK-NEXT: [[A21:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
633 ; CHECK-NEXT: [[A22:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
634 ; CHECK-NEXT: [[A23:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
635 ; CHECK-NEXT: [[A24:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
636 ; CHECK-NEXT: [[A25:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
637 ; CHECK-NEXT: [[A26:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
638 ; CHECK-NEXT: [[A27:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
639 ; CHECK-NEXT: [[A28:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
640 ; CHECK-NEXT: [[A29:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
641 ; CHECK-NEXT: [[A30:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
642 ; CHECK-NEXT: [[A31:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
643 ; CHECK-NEXT: [[A32:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
644 ; CHECK-NEXT: [[A33:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
645 ; CHECK-NEXT: [[A34:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
646 ; CHECK-NEXT: [[A35:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
647 ; CHECK-NEXT: [[A36:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
648 ; CHECK-NEXT: [[A37:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
649 ; CHECK-NEXT: [[A38:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
650 ; CHECK-NEXT: [[A39:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
651 ; CHECK-NEXT: [[A40:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
652 ; CHECK-NEXT: [[A41:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
653 ; CHECK-NEXT: [[A42:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
654 ; CHECK-NEXT: [[A43:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
655 ; CHECK-NEXT: [[A44:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
656 ; CHECK-NEXT: [[A45:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
657 ; CHECK-NEXT: [[A46:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
658 ; CHECK-NEXT: [[A47:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
659 ; CHECK-NEXT: [[A48:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
660 ; CHECK-NEXT: [[A49:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
661 ; CHECK-NEXT: [[A50:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
662 ; CHECK-NEXT: [[A51:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
663 ; CHECK-NEXT: [[A52:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
664 ; CHECK-NEXT: [[A53:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
665 ; CHECK-NEXT: [[A54:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
666 ; CHECK-NEXT: [[A55:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
667 ; CHECK-NEXT: [[A56:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
668 ; CHECK-NEXT: [[A57:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
669 ; CHECK-NEXT: [[A58:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
670 ; CHECK-NEXT: [[A59:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
671 ; CHECK-NEXT: [[A60:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
672 ; CHECK-NEXT: [[A61:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
673 ; CHECK-NEXT: [[A62:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
674 ; CHECK-NEXT: [[A63:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
675 ; CHECK-NEXT: [[B0:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0), align 1
676 ; CHECK-NEXT: [[B1:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1), align 1
677 ; CHECK-NEXT: [[B2:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2), align 1
678 ; CHECK-NEXT: [[B3:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3), align 1
679 ; CHECK-NEXT: [[B4:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4), align 1
680 ; CHECK-NEXT: [[B5:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5), align 1
681 ; CHECK-NEXT: [[B6:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6), align 1
682 ; CHECK-NEXT: [[B7:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7), align 1
683 ; CHECK-NEXT: [[B8:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8), align 1
684 ; CHECK-NEXT: [[B9:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9), align 1
685 ; CHECK-NEXT: [[B10:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
686 ; CHECK-NEXT: [[B11:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
687 ; CHECK-NEXT: [[B12:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
688 ; CHECK-NEXT: [[B13:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
689 ; CHECK-NEXT: [[B14:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
690 ; CHECK-NEXT: [[B15:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
691 ; CHECK-NEXT: [[B16:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
692 ; CHECK-NEXT: [[B17:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
693 ; CHECK-NEXT: [[B18:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
694 ; CHECK-NEXT: [[B19:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
695 ; CHECK-NEXT: [[B20:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
696 ; CHECK-NEXT: [[B21:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
697 ; CHECK-NEXT: [[B22:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
698 ; CHECK-NEXT: [[B23:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
699 ; CHECK-NEXT: [[B24:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
700 ; CHECK-NEXT: [[B25:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
701 ; CHECK-NEXT: [[B26:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
702 ; CHECK-NEXT: [[B27:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
703 ; CHECK-NEXT: [[B28:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
704 ; CHECK-NEXT: [[B29:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
705 ; CHECK-NEXT: [[B30:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
706 ; CHECK-NEXT: [[B31:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
707 ; CHECK-NEXT: [[B32:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
708 ; CHECK-NEXT: [[B33:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
709 ; CHECK-NEXT: [[B34:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
710 ; CHECK-NEXT: [[B35:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
711 ; CHECK-NEXT: [[B36:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
712 ; CHECK-NEXT: [[B37:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
713 ; CHECK-NEXT: [[B38:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
714 ; CHECK-NEXT: [[B39:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
715 ; CHECK-NEXT: [[B40:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
716 ; CHECK-NEXT: [[B41:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
717 ; CHECK-NEXT: [[B42:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
718 ; CHECK-NEXT: [[B43:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
719 ; CHECK-NEXT: [[B44:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
720 ; CHECK-NEXT: [[B45:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
721 ; CHECK-NEXT: [[B46:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
722 ; CHECK-NEXT: [[B47:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
723 ; CHECK-NEXT: [[B48:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
724 ; CHECK-NEXT: [[B49:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
725 ; CHECK-NEXT: [[B50:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
726 ; CHECK-NEXT: [[B51:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
727 ; CHECK-NEXT: [[B52:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
728 ; CHECK-NEXT: [[B53:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
729 ; CHECK-NEXT: [[B54:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
730 ; CHECK-NEXT: [[B55:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
731 ; CHECK-NEXT: [[B56:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
732 ; CHECK-NEXT: [[B57:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
733 ; CHECK-NEXT: [[B58:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
734 ; CHECK-NEXT: [[B59:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
735 ; CHECK-NEXT: [[B60:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
736 ; CHECK-NEXT: [[B61:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
737 ; CHECK-NEXT: [[B62:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
738 ; CHECK-NEXT: [[B63:%.*]] = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
739 ; CHECK-NEXT: [[C0:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A0]], i8 [[B0]])
740 ; CHECK-NEXT: [[C1:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A1]], i8 [[B1]])
741 ; CHECK-NEXT: [[C2:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A2]], i8 [[B2]])
742 ; CHECK-NEXT: [[C3:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A3]], i8 [[B3]])
743 ; CHECK-NEXT: [[C4:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A4]], i8 [[B4]])
744 ; CHECK-NEXT: [[C5:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A5]], i8 [[B5]])
745 ; CHECK-NEXT: [[C6:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A6]], i8 [[B6]])
746 ; CHECK-NEXT: [[C7:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A7]], i8 [[B7]])
747 ; CHECK-NEXT: [[C8:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A8]], i8 [[B8]])
748 ; CHECK-NEXT: [[C9:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A9]], i8 [[B9]])
749 ; CHECK-NEXT: [[C10:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A10]], i8 [[B10]])
750 ; CHECK-NEXT: [[C11:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A11]], i8 [[B11]])
751 ; CHECK-NEXT: [[C12:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A12]], i8 [[B12]])
752 ; CHECK-NEXT: [[C13:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A13]], i8 [[B13]])
753 ; CHECK-NEXT: [[C14:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A14]], i8 [[B14]])
754 ; CHECK-NEXT: [[C15:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A15]], i8 [[B15]])
755 ; CHECK-NEXT: [[C16:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A16]], i8 [[B16]])
756 ; CHECK-NEXT: [[C17:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A17]], i8 [[B17]])
757 ; CHECK-NEXT: [[C18:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A18]], i8 [[B18]])
758 ; CHECK-NEXT: [[C19:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A19]], i8 [[B19]])
759 ; CHECK-NEXT: [[C20:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A20]], i8 [[B20]])
760 ; CHECK-NEXT: [[C21:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A21]], i8 [[B21]])
761 ; CHECK-NEXT: [[C22:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A22]], i8 [[B22]])
762 ; CHECK-NEXT: [[C23:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A23]], i8 [[B23]])
763 ; CHECK-NEXT: [[C24:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A24]], i8 [[B24]])
764 ; CHECK-NEXT: [[C25:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A25]], i8 [[B25]])
765 ; CHECK-NEXT: [[C26:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A26]], i8 [[B26]])
766 ; CHECK-NEXT: [[C27:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A27]], i8 [[B27]])
767 ; CHECK-NEXT: [[C28:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A28]], i8 [[B28]])
768 ; CHECK-NEXT: [[C29:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A29]], i8 [[B29]])
769 ; CHECK-NEXT: [[C30:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A30]], i8 [[B30]])
770 ; CHECK-NEXT: [[C31:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A31]], i8 [[B31]])
771 ; CHECK-NEXT: [[C32:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A32]], i8 [[B32]])
772 ; CHECK-NEXT: [[C33:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A33]], i8 [[B33]])
773 ; CHECK-NEXT: [[C34:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A34]], i8 [[B34]])
774 ; CHECK-NEXT: [[C35:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A35]], i8 [[B35]])
775 ; CHECK-NEXT: [[C36:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A36]], i8 [[B36]])
776 ; CHECK-NEXT: [[C37:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A37]], i8 [[B37]])
777 ; CHECK-NEXT: [[C38:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A38]], i8 [[B38]])
778 ; CHECK-NEXT: [[C39:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A39]], i8 [[B39]])
779 ; CHECK-NEXT: [[C40:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A40]], i8 [[B40]])
780 ; CHECK-NEXT: [[C41:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A41]], i8 [[B41]])
781 ; CHECK-NEXT: [[C42:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A42]], i8 [[B42]])
782 ; CHECK-NEXT: [[C43:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A43]], i8 [[B43]])
783 ; CHECK-NEXT: [[C44:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A44]], i8 [[B44]])
784 ; CHECK-NEXT: [[C45:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A45]], i8 [[B45]])
785 ; CHECK-NEXT: [[C46:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A46]], i8 [[B46]])
786 ; CHECK-NEXT: [[C47:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A47]], i8 [[B47]])
787 ; CHECK-NEXT: [[C48:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A48]], i8 [[B48]])
788 ; CHECK-NEXT: [[C49:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A49]], i8 [[B49]])
789 ; CHECK-NEXT: [[C50:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A50]], i8 [[B50]])
790 ; CHECK-NEXT: [[C51:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A51]], i8 [[B51]])
791 ; CHECK-NEXT: [[C52:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A52]], i8 [[B52]])
792 ; CHECK-NEXT: [[C53:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A53]], i8 [[B53]])
793 ; CHECK-NEXT: [[C54:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A54]], i8 [[B54]])
794 ; CHECK-NEXT: [[C55:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A55]], i8 [[B55]])
795 ; CHECK-NEXT: [[C56:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A56]], i8 [[B56]])
796 ; CHECK-NEXT: [[C57:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A57]], i8 [[B57]])
797 ; CHECK-NEXT: [[C58:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A58]], i8 [[B58]])
798 ; CHECK-NEXT: [[C59:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A59]], i8 [[B59]])
799 ; CHECK-NEXT: [[C60:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A60]], i8 [[B60]])
800 ; CHECK-NEXT: [[C61:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A61]], i8 [[B61]])
801 ; CHECK-NEXT: [[C62:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A62]], i8 [[B62]])
802 ; CHECK-NEXT: [[C63:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A63]], i8 [[B63]])
803 ; CHECK-NEXT: [[R0:%.*]] = extractvalue { i8, i1 } [[C0]], 0
804 ; CHECK-NEXT: [[R1:%.*]] = extractvalue { i8, i1 } [[C1]], 0
805 ; CHECK-NEXT: [[R2:%.*]] = extractvalue { i8, i1 } [[C2]], 0
806 ; CHECK-NEXT: [[R3:%.*]] = extractvalue { i8, i1 } [[C3]], 0
807 ; CHECK-NEXT: [[R4:%.*]] = extractvalue { i8, i1 } [[C4]], 0
808 ; CHECK-NEXT: [[R5:%.*]] = extractvalue { i8, i1 } [[C5]], 0
809 ; CHECK-NEXT: [[R6:%.*]] = extractvalue { i8, i1 } [[C6]], 0
810 ; CHECK-NEXT: [[R7:%.*]] = extractvalue { i8, i1 } [[C7]], 0
811 ; CHECK-NEXT: [[R8:%.*]] = extractvalue { i8, i1 } [[C8]], 0
812 ; CHECK-NEXT: [[R9:%.*]] = extractvalue { i8, i1 } [[C9]], 0
813 ; CHECK-NEXT: [[R10:%.*]] = extractvalue { i8, i1 } [[C10]], 0
814 ; CHECK-NEXT: [[R11:%.*]] = extractvalue { i8, i1 } [[C11]], 0
815 ; CHECK-NEXT: [[R12:%.*]] = extractvalue { i8, i1 } [[C12]], 0
816 ; CHECK-NEXT: [[R13:%.*]] = extractvalue { i8, i1 } [[C13]], 0
817 ; CHECK-NEXT: [[R14:%.*]] = extractvalue { i8, i1 } [[C14]], 0
818 ; CHECK-NEXT: [[R15:%.*]] = extractvalue { i8, i1 } [[C15]], 0
819 ; CHECK-NEXT: [[R16:%.*]] = extractvalue { i8, i1 } [[C16]], 0
820 ; CHECK-NEXT: [[R17:%.*]] = extractvalue { i8, i1 } [[C17]], 0
821 ; CHECK-NEXT: [[R18:%.*]] = extractvalue { i8, i1 } [[C18]], 0
822 ; CHECK-NEXT: [[R19:%.*]] = extractvalue { i8, i1 } [[C19]], 0
823 ; CHECK-NEXT: [[R20:%.*]] = extractvalue { i8, i1 } [[C20]], 0
824 ; CHECK-NEXT: [[R21:%.*]] = extractvalue { i8, i1 } [[C21]], 0
825 ; CHECK-NEXT: [[R22:%.*]] = extractvalue { i8, i1 } [[C22]], 0
826 ; CHECK-NEXT: [[R23:%.*]] = extractvalue { i8, i1 } [[C23]], 0
827 ; CHECK-NEXT: [[R24:%.*]] = extractvalue { i8, i1 } [[C24]], 0
828 ; CHECK-NEXT: [[R25:%.*]] = extractvalue { i8, i1 } [[C25]], 0
829 ; CHECK-NEXT: [[R26:%.*]] = extractvalue { i8, i1 } [[C26]], 0
830 ; CHECK-NEXT: [[R27:%.*]] = extractvalue { i8, i1 } [[C27]], 0
831 ; CHECK-NEXT: [[R28:%.*]] = extractvalue { i8, i1 } [[C28]], 0
832 ; CHECK-NEXT: [[R29:%.*]] = extractvalue { i8, i1 } [[C29]], 0
833 ; CHECK-NEXT: [[R30:%.*]] = extractvalue { i8, i1 } [[C30]], 0
834 ; CHECK-NEXT: [[R31:%.*]] = extractvalue { i8, i1 } [[C31]], 0
835 ; CHECK-NEXT: [[R32:%.*]] = extractvalue { i8, i1 } [[C32]], 0
836 ; CHECK-NEXT: [[R33:%.*]] = extractvalue { i8, i1 } [[C33]], 0
837 ; CHECK-NEXT: [[R34:%.*]] = extractvalue { i8, i1 } [[C34]], 0
838 ; CHECK-NEXT: [[R35:%.*]] = extractvalue { i8, i1 } [[C35]], 0
839 ; CHECK-NEXT: [[R36:%.*]] = extractvalue { i8, i1 } [[C36]], 0
840 ; CHECK-NEXT: [[R37:%.*]] = extractvalue { i8, i1 } [[C37]], 0
841 ; CHECK-NEXT: [[R38:%.*]] = extractvalue { i8, i1 } [[C38]], 0
842 ; CHECK-NEXT: [[R39:%.*]] = extractvalue { i8, i1 } [[C39]], 0
843 ; CHECK-NEXT: [[R40:%.*]] = extractvalue { i8, i1 } [[C40]], 0
844 ; CHECK-NEXT: [[R41:%.*]] = extractvalue { i8, i1 } [[C41]], 0
845 ; CHECK-NEXT: [[R42:%.*]] = extractvalue { i8, i1 } [[C42]], 0
846 ; CHECK-NEXT: [[R43:%.*]] = extractvalue { i8, i1 } [[C43]], 0
847 ; CHECK-NEXT: [[R44:%.*]] = extractvalue { i8, i1 } [[C44]], 0
848 ; CHECK-NEXT: [[R45:%.*]] = extractvalue { i8, i1 } [[C45]], 0
849 ; CHECK-NEXT: [[R46:%.*]] = extractvalue { i8, i1 } [[C46]], 0
850 ; CHECK-NEXT: [[R47:%.*]] = extractvalue { i8, i1 } [[C47]], 0
851 ; CHECK-NEXT: [[R48:%.*]] = extractvalue { i8, i1 } [[C48]], 0
852 ; CHECK-NEXT: [[R49:%.*]] = extractvalue { i8, i1 } [[C49]], 0
853 ; CHECK-NEXT: [[R50:%.*]] = extractvalue { i8, i1 } [[C50]], 0
854 ; CHECK-NEXT: [[R51:%.*]] = extractvalue { i8, i1 } [[C51]], 0
855 ; CHECK-NEXT: [[R52:%.*]] = extractvalue { i8, i1 } [[C52]], 0
856 ; CHECK-NEXT: [[R53:%.*]] = extractvalue { i8, i1 } [[C53]], 0
857 ; CHECK-NEXT: [[R54:%.*]] = extractvalue { i8, i1 } [[C54]], 0
858 ; CHECK-NEXT: [[R55:%.*]] = extractvalue { i8, i1 } [[C55]], 0
859 ; CHECK-NEXT: [[R56:%.*]] = extractvalue { i8, i1 } [[C56]], 0
860 ; CHECK-NEXT: [[R57:%.*]] = extractvalue { i8, i1 } [[C57]], 0
861 ; CHECK-NEXT: [[R58:%.*]] = extractvalue { i8, i1 } [[C58]], 0
862 ; CHECK-NEXT: [[R59:%.*]] = extractvalue { i8, i1 } [[C59]], 0
863 ; CHECK-NEXT: [[R60:%.*]] = extractvalue { i8, i1 } [[C60]], 0
864 ; CHECK-NEXT: [[R61:%.*]] = extractvalue { i8, i1 } [[C61]], 0
865 ; CHECK-NEXT: [[R62:%.*]] = extractvalue { i8, i1 } [[C62]], 0
866 ; CHECK-NEXT: [[R63:%.*]] = extractvalue { i8, i1 } [[C63]], 0
867 ; CHECK-NEXT: store i8 [[R0]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0), align 1
868 ; CHECK-NEXT: store i8 [[R1]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1), align 1
869 ; CHECK-NEXT: store i8 [[R2]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2), align 1
870 ; CHECK-NEXT: store i8 [[R3]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3), align 1
871 ; CHECK-NEXT: store i8 [[R4]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4), align 1
872 ; CHECK-NEXT: store i8 [[R5]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5), align 1
873 ; CHECK-NEXT: store i8 [[R6]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6), align 1
874 ; CHECK-NEXT: store i8 [[R7]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7), align 1
875 ; CHECK-NEXT: store i8 [[R8]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8), align 1
876 ; CHECK-NEXT: store i8 [[R9]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9), align 1
877 ; CHECK-NEXT: store i8 [[R10]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
878 ; CHECK-NEXT: store i8 [[R11]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
879 ; CHECK-NEXT: store i8 [[R12]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
880 ; CHECK-NEXT: store i8 [[R13]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
881 ; CHECK-NEXT: store i8 [[R14]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
882 ; CHECK-NEXT: store i8 [[R15]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
883 ; CHECK-NEXT: store i8 [[R16]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
884 ; CHECK-NEXT: store i8 [[R17]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
885 ; CHECK-NEXT: store i8 [[R18]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
886 ; CHECK-NEXT: store i8 [[R19]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
887 ; CHECK-NEXT: store i8 [[R20]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
888 ; CHECK-NEXT: store i8 [[R21]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
889 ; CHECK-NEXT: store i8 [[R22]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
890 ; CHECK-NEXT: store i8 [[R23]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
891 ; CHECK-NEXT: store i8 [[R24]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
892 ; CHECK-NEXT: store i8 [[R25]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
893 ; CHECK-NEXT: store i8 [[R26]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
894 ; CHECK-NEXT: store i8 [[R27]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
895 ; CHECK-NEXT: store i8 [[R28]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
896 ; CHECK-NEXT: store i8 [[R29]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
897 ; CHECK-NEXT: store i8 [[R30]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
898 ; CHECK-NEXT: store i8 [[R31]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
899 ; CHECK-NEXT: store i8 [[R32]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
900 ; CHECK-NEXT: store i8 [[R33]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
901 ; CHECK-NEXT: store i8 [[R34]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
902 ; CHECK-NEXT: store i8 [[R35]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
903 ; CHECK-NEXT: store i8 [[R36]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
904 ; CHECK-NEXT: store i8 [[R37]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
905 ; CHECK-NEXT: store i8 [[R38]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
906 ; CHECK-NEXT: store i8 [[R39]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
907 ; CHECK-NEXT: store i8 [[R40]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
908 ; CHECK-NEXT: store i8 [[R41]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
909 ; CHECK-NEXT: store i8 [[R42]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
910 ; CHECK-NEXT: store i8 [[R43]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
911 ; CHECK-NEXT: store i8 [[R44]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
912 ; CHECK-NEXT: store i8 [[R45]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
913 ; CHECK-NEXT: store i8 [[R46]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
914 ; CHECK-NEXT: store i8 [[R47]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
915 ; CHECK-NEXT: store i8 [[R48]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
916 ; CHECK-NEXT: store i8 [[R49]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
917 ; CHECK-NEXT: store i8 [[R50]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
918 ; CHECK-NEXT: store i8 [[R51]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
919 ; CHECK-NEXT: store i8 [[R52]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
920 ; CHECK-NEXT: store i8 [[R53]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
921 ; CHECK-NEXT: store i8 [[R54]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
922 ; CHECK-NEXT: store i8 [[R55]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
923 ; CHECK-NEXT: store i8 [[R56]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
924 ; CHECK-NEXT: store i8 [[R57]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
925 ; CHECK-NEXT: store i8 [[R58]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
926 ; CHECK-NEXT: store i8 [[R59]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
927 ; CHECK-NEXT: store i8 [[R60]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
928 ; CHECK-NEXT: store i8 [[R61]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
929 ; CHECK-NEXT: store i8 [[R62]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
930 ; CHECK-NEXT: store i8 [[R63]], i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1
931 ; CHECK-NEXT: ret void
933 %a0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 0 ), align 1
934 %a1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 1 ), align 1
935 %a2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 2 ), align 1
936 %a3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 3 ), align 1
937 %a4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 4 ), align 1
938 %a5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 5 ), align 1
939 %a6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 6 ), align 1
940 %a7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 7 ), align 1
941 %a8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 8 ), align 1
942 %a9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 9 ), align 1
943 %a10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 10), align 1
944 %a11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 11), align 1
945 %a12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 12), align 1
946 %a13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 13), align 1
947 %a14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 14), align 1
948 %a15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 15), align 1
949 %a16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 16), align 1
950 %a17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 17), align 1
951 %a18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 18), align 1
952 %a19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 19), align 1
953 %a20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 20), align 1
954 %a21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 21), align 1
955 %a22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 22), align 1
956 %a23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 23), align 1
957 %a24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 24), align 1
958 %a25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 25), align 1
959 %a26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 26), align 1
960 %a27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 27), align 1
961 %a28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 28), align 1
962 %a29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 29), align 1
963 %a30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 30), align 1
964 %a31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 31), align 1
965 %a32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 32), align 1
966 %a33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 33), align 1
967 %a34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 34), align 1
968 %a35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 35), align 1
969 %a36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 36), align 1
970 %a37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 37), align 1
971 %a38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 38), align 1
972 %a39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 39), align 1
973 %a40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 40), align 1
974 %a41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 41), align 1
975 %a42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 42), align 1
976 %a43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 43), align 1
977 %a44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 44), align 1
978 %a45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 45), align 1
979 %a46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 46), align 1
980 %a47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 47), align 1
981 %a48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 48), align 1
982 %a49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 49), align 1
983 %a50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 50), align 1
984 %a51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 51), align 1
985 %a52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 52), align 1
986 %a53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 53), align 1
987 %a54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 54), align 1
988 %a55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 55), align 1
989 %a56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 56), align 1
990 %a57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 57), align 1
991 %a58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 58), align 1
992 %a59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 59), align 1
993 %a60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 60), align 1
994 %a61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 61), align 1
995 %a62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 62), align 1
996 %a63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @a8, i32 0, i64 63), align 1
997 %b0 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 0 ), align 1
998 %b1 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 1 ), align 1
999 %b2 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 2 ), align 1
1000 %b3 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 3 ), align 1
1001 %b4 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 4 ), align 1
1002 %b5 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 5 ), align 1
1003 %b6 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 6 ), align 1
1004 %b7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 7 ), align 1
1005 %b8 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 8 ), align 1
1006 %b9 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 9 ), align 1
1007 %b10 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 10), align 1
1008 %b11 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 11), align 1
1009 %b12 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 12), align 1
1010 %b13 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 13), align 1
1011 %b14 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 14), align 1
1012 %b15 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 15), align 1
1013 %b16 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 16), align 1
1014 %b17 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 17), align 1
1015 %b18 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 18), align 1
1016 %b19 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 19), align 1
1017 %b20 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 20), align 1
1018 %b21 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 21), align 1
1019 %b22 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 22), align 1
1020 %b23 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 23), align 1
1021 %b24 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 24), align 1
1022 %b25 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 25), align 1
1023 %b26 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 26), align 1
1024 %b27 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 27), align 1
1025 %b28 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 28), align 1
1026 %b29 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 29), align 1
1027 %b30 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 30), align 1
1028 %b31 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 31), align 1
1029 %b32 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 32), align 1
1030 %b33 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 33), align 1
1031 %b34 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 34), align 1
1032 %b35 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 35), align 1
1033 %b36 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 36), align 1
1034 %b37 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 37), align 1
1035 %b38 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 38), align 1
1036 %b39 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 39), align 1
1037 %b40 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 40), align 1
1038 %b41 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 41), align 1
1039 %b42 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 42), align 1
1040 %b43 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 43), align 1
1041 %b44 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 44), align 1
1042 %b45 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 45), align 1
1043 %b46 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 46), align 1
1044 %b47 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 47), align 1
1045 %b48 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 48), align 1
1046 %b49 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 49), align 1
1047 %b50 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 50), align 1
1048 %b51 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 51), align 1
1049 %b52 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 52), align 1
1050 %b53 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 53), align 1
1051 %b54 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 54), align 1
1052 %b55 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 55), align 1
1053 %b56 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 56), align 1
1054 %b57 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 57), align 1
1055 %b58 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 58), align 1
1056 %b59 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 59), align 1
1057 %b60 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 60), align 1
1058 %b61 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 61), align 1
1059 %b62 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 62), align 1
1060 %b63 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @b8, i32 0, i64 63), align 1
1061 %c0 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a0 , i8 %b0 )
1062 %c1 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a1 , i8 %b1 )
1063 %c2 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a2 , i8 %b2 )
1064 %c3 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a3 , i8 %b3 )
1065 %c4 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a4 , i8 %b4 )
1066 %c5 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a5 , i8 %b5 )
1067 %c6 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a6 , i8 %b6 )
1068 %c7 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a7 , i8 %b7 )
1069 %c8 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a8 , i8 %b8 )
1070 %c9 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a9 , i8 %b9 )
1071 %c10 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a10, i8 %b10)
1072 %c11 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a11, i8 %b11)
1073 %c12 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a12, i8 %b12)
1074 %c13 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a13, i8 %b13)
1075 %c14 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a14, i8 %b14)
1076 %c15 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a15, i8 %b15)
1077 %c16 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a16, i8 %b16)
1078 %c17 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a17, i8 %b17)
1079 %c18 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a18, i8 %b18)
1080 %c19 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a19, i8 %b19)
1081 %c20 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a20, i8 %b20)
1082 %c21 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a21, i8 %b21)
1083 %c22 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a22, i8 %b22)
1084 %c23 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a23, i8 %b23)
1085 %c24 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a24, i8 %b24)
1086 %c25 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a25, i8 %b25)
1087 %c26 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a26, i8 %b26)
1088 %c27 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a27, i8 %b27)
1089 %c28 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a28, i8 %b28)
1090 %c29 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a29, i8 %b29)
1091 %c30 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a30, i8 %b30)
1092 %c31 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a31, i8 %b31)
1093 %c32 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a32, i8 %b32)
1094 %c33 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a33, i8 %b33)
1095 %c34 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a34, i8 %b34)
1096 %c35 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a35, i8 %b35)
1097 %c36 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a36, i8 %b36)
1098 %c37 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a37, i8 %b37)
1099 %c38 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a38, i8 %b38)
1100 %c39 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a39, i8 %b39)
1101 %c40 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a40, i8 %b40)
1102 %c41 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a41, i8 %b41)
1103 %c42 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a42, i8 %b42)
1104 %c43 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a43, i8 %b43)
1105 %c44 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a44, i8 %b44)
1106 %c45 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a45, i8 %b45)
1107 %c46 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a46, i8 %b46)
1108 %c47 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a47, i8 %b47)
1109 %c48 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a48, i8 %b48)
1110 %c49 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a49, i8 %b49)
1111 %c50 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a50, i8 %b50)
1112 %c51 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a51, i8 %b51)
1113 %c52 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a52, i8 %b52)
1114 %c53 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a53, i8 %b53)
1115 %c54 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a54, i8 %b54)
1116 %c55 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a55, i8 %b55)
1117 %c56 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a56, i8 %b56)
1118 %c57 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a57, i8 %b57)
1119 %c58 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a58, i8 %b58)
1120 %c59 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a59, i8 %b59)
1121 %c60 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a60, i8 %b60)
1122 %c61 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a61, i8 %b61)
1123 %c62 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a62, i8 %b62)
1124 %c63 = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a63, i8 %b63)
1125 %r0 = extractvalue {i8, i1} %c0 , 0
1126 %r1 = extractvalue {i8, i1} %c1 , 0
1127 %r2 = extractvalue {i8, i1} %c2 , 0
1128 %r3 = extractvalue {i8, i1} %c3 , 0
1129 %r4 = extractvalue {i8, i1} %c4 , 0
1130 %r5 = extractvalue {i8, i1} %c5 , 0
1131 %r6 = extractvalue {i8, i1} %c6 , 0
1132 %r7 = extractvalue {i8, i1} %c7 , 0
1133 %r8 = extractvalue {i8, i1} %c8 , 0
1134 %r9 = extractvalue {i8, i1} %c9 , 0
1135 %r10 = extractvalue {i8, i1} %c10, 0
1136 %r11 = extractvalue {i8, i1} %c11, 0
1137 %r12 = extractvalue {i8, i1} %c12, 0
1138 %r13 = extractvalue {i8, i1} %c13, 0
1139 %r14 = extractvalue {i8, i1} %c14, 0
1140 %r15 = extractvalue {i8, i1} %c15, 0
1141 %r16 = extractvalue {i8, i1} %c16, 0
1142 %r17 = extractvalue {i8, i1} %c17, 0
1143 %r18 = extractvalue {i8, i1} %c18, 0
1144 %r19 = extractvalue {i8, i1} %c19, 0
1145 %r20 = extractvalue {i8, i1} %c20, 0
1146 %r21 = extractvalue {i8, i1} %c21, 0
1147 %r22 = extractvalue {i8, i1} %c22, 0
1148 %r23 = extractvalue {i8, i1} %c23, 0
1149 %r24 = extractvalue {i8, i1} %c24, 0
1150 %r25 = extractvalue {i8, i1} %c25, 0
1151 %r26 = extractvalue {i8, i1} %c26, 0
1152 %r27 = extractvalue {i8, i1} %c27, 0
1153 %r28 = extractvalue {i8, i1} %c28, 0
1154 %r29 = extractvalue {i8, i1} %c29, 0
1155 %r30 = extractvalue {i8, i1} %c30, 0
1156 %r31 = extractvalue {i8, i1} %c31, 0
1157 %r32 = extractvalue {i8, i1} %c32, 0
1158 %r33 = extractvalue {i8, i1} %c33, 0
1159 %r34 = extractvalue {i8, i1} %c34, 0
1160 %r35 = extractvalue {i8, i1} %c35, 0
1161 %r36 = extractvalue {i8, i1} %c36, 0
1162 %r37 = extractvalue {i8, i1} %c37, 0
1163 %r38 = extractvalue {i8, i1} %c38, 0
1164 %r39 = extractvalue {i8, i1} %c39, 0
1165 %r40 = extractvalue {i8, i1} %c40, 0
1166 %r41 = extractvalue {i8, i1} %c41, 0
1167 %r42 = extractvalue {i8, i1} %c42, 0
1168 %r43 = extractvalue {i8, i1} %c43, 0
1169 %r44 = extractvalue {i8, i1} %c44, 0
1170 %r45 = extractvalue {i8, i1} %c45, 0
1171 %r46 = extractvalue {i8, i1} %c46, 0
1172 %r47 = extractvalue {i8, i1} %c47, 0
1173 %r48 = extractvalue {i8, i1} %c48, 0
1174 %r49 = extractvalue {i8, i1} %c49, 0
1175 %r50 = extractvalue {i8, i1} %c50, 0
1176 %r51 = extractvalue {i8, i1} %c51, 0
1177 %r52 = extractvalue {i8, i1} %c52, 0
1178 %r53 = extractvalue {i8, i1} %c53, 0
1179 %r54 = extractvalue {i8, i1} %c54, 0
1180 %r55 = extractvalue {i8, i1} %c55, 0
1181 %r56 = extractvalue {i8, i1} %c56, 0
1182 %r57 = extractvalue {i8, i1} %c57, 0
1183 %r58 = extractvalue {i8, i1} %c58, 0
1184 %r59 = extractvalue {i8, i1} %c59, 0
1185 %r60 = extractvalue {i8, i1} %c60, 0
1186 %r61 = extractvalue {i8, i1} %c61, 0
1187 %r62 = extractvalue {i8, i1} %c62, 0
1188 %r63 = extractvalue {i8, i1} %c63, 0
1189 store i8 %r0 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 0 ), align 1
1190 store i8 %r1 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 1 ), align 1
1191 store i8 %r2 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 2 ), align 1
1192 store i8 %r3 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 3 ), align 1
1193 store i8 %r4 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 4 ), align 1
1194 store i8 %r5 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 5 ), align 1
1195 store i8 %r6 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 6 ), align 1
1196 store i8 %r7 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 7 ), align 1
1197 store i8 %r8 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 8 ), align 1
1198 store i8 %r9 , i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 9 ), align 1
1199 store i8 %r10, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 10), align 1
1200 store i8 %r11, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 11), align 1
1201 store i8 %r12, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 12), align 1
1202 store i8 %r13, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 13), align 1
1203 store i8 %r14, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 14), align 1
1204 store i8 %r15, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 15), align 1
1205 store i8 %r16, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 16), align 1
1206 store i8 %r17, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 17), align 1
1207 store i8 %r18, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 18), align 1
1208 store i8 %r19, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 19), align 1
1209 store i8 %r20, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 20), align 1
1210 store i8 %r21, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 21), align 1
1211 store i8 %r22, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 22), align 1
1212 store i8 %r23, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 23), align 1
1213 store i8 %r24, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 24), align 1
1214 store i8 %r25, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 25), align 1
1215 store i8 %r26, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 26), align 1
1216 store i8 %r27, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 27), align 1
1217 store i8 %r28, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 28), align 1
1218 store i8 %r29, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 29), align 1
1219 store i8 %r30, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 30), align 1
1220 store i8 %r31, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 31), align 1
1221 store i8 %r32, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 32), align 1
1222 store i8 %r33, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 33), align 1
1223 store i8 %r34, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 34), align 1
1224 store i8 %r35, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 35), align 1
1225 store i8 %r36, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 36), align 1
1226 store i8 %r37, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 37), align 1
1227 store i8 %r38, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 38), align 1
1228 store i8 %r39, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 39), align 1
1229 store i8 %r40, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 40), align 1
1230 store i8 %r41, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 41), align 1
1231 store i8 %r42, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 42), align 1
1232 store i8 %r43, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 43), align 1
1233 store i8 %r44, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 44), align 1
1234 store i8 %r45, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 45), align 1
1235 store i8 %r46, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 46), align 1
1236 store i8 %r47, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 47), align 1
1237 store i8 %r48, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 48), align 1
1238 store i8 %r49, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 49), align 1
1239 store i8 %r50, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 50), align 1
1240 store i8 %r51, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 51), align 1
1241 store i8 %r52, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 52), align 1
1242 store i8 %r53, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 53), align 1
1243 store i8 %r54, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 54), align 1
1244 store i8 %r55, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 55), align 1
1245 store i8 %r56, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 56), align 1
1246 store i8 %r57, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 57), align 1
1247 store i8 %r58, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 58), align 1
1248 store i8 %r59, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 59), align 1
1249 store i8 %r60, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 60), align 1
1250 store i8 %r61, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 61), align 1
1251 store i8 %r62, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 62), align 1
1252 store i8 %r63, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @c8, i32 0, i64 63), align 1