1 // REQUIRES: arm-registered-target
2 // RUN: %clang_cc1 -triple arm64-apple-ios9 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK
3 // RUN: %clang_cc1 -triple armv7-apple-ios9 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK
4 // RUN: %clang_cc1 -triple x86_64-apple-macos10.13 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK
6 typedef __fp16 half4
__attribute__ ((vector_size (8)));
7 typedef short short4
__attribute__ ((vector_size (8)));
12 // CHECK-LABEL: testFP16Vec0
13 // CHECK: %[[V0:.*]] = load <4 x half>, ptr @hv0, align 8
14 // CHECK: %[[CONV:.*]] = fpext <4 x half> %[[V0]] to <4 x float>
15 // CHECK: %[[V1:.*]] = load <4 x half>, ptr @hv1, align 8
16 // CHECK: %[[CONV1:.*]] = fpext <4 x half> %[[V1]] to <4 x float>
17 // CHECK: %[[ADD:.*]] = fadd <4 x float> %[[CONV]], %[[CONV1]]
18 // CHECK: %[[CONV2:.*]] = fptrunc <4 x float> %[[ADD]] to <4 x half>
19 // CHECK: store <4 x half> %[[CONV2]], ptr @hv0, align 8
20 // CHECK: %[[V2:.*]] = load <4 x half>, ptr @hv0, align 8
21 // CHECK: %[[CONV3:.*]] = fpext <4 x half> %[[V2]] to <4 x float>
22 // CHECK: %[[V3:.*]] = load <4 x half>, ptr @hv1, align 8
23 // CHECK: %[[CONV4:.*]] = fpext <4 x half> %[[V3]] to <4 x float>
24 // CHECK: %[[SUB:.*]] = fsub <4 x float> %[[CONV3]], %[[CONV4]]
25 // CHECK: %[[CONV5:.*]] = fptrunc <4 x float> %[[SUB]] to <4 x half>
26 // CHECK: store <4 x half> %[[CONV5]], ptr @hv0, align 8
27 // CHECK: %[[V4:.*]] = load <4 x half>, ptr @hv0, align 8
28 // CHECK: %[[CONV6:.*]] = fpext <4 x half> %[[V4]] to <4 x float>
29 // CHECK: %[[V5:.*]] = load <4 x half>, ptr @hv1, align 8
30 // CHECK: %[[CONV7:.*]] = fpext <4 x half> %[[V5]] to <4 x float>
31 // CHECK: %[[MUL:.*]] = fmul <4 x float> %[[CONV6]], %[[CONV7]]
32 // CHECK: %[[CONV8:.*]] = fptrunc <4 x float> %[[MUL]] to <4 x half>
33 // CHECK: store <4 x half> %[[CONV8]], ptr @hv0, align 8
34 // CHECK: %[[V6:.*]] = load <4 x half>, ptr @hv0, align 8
35 // CHECK: %[[CONV9:.*]] = fpext <4 x half> %[[V6]] to <4 x float>
36 // CHECK: %[[V7:.*]] = load <4 x half>, ptr @hv1, align 8
37 // CHECK: %[[CONV10:.*]] = fpext <4 x half> %[[V7]] to <4 x float>
38 // CHECK: %[[DIV:.*]] = fdiv <4 x float> %[[CONV9]], %[[CONV10]]
39 // CHECK: %[[CONV11:.*]] = fptrunc <4 x float> %[[DIV]] to <4 x half>
40 // CHECK: store <4 x half> %[[CONV11]], ptr @hv0, align 8
49 // CHECK-LABEL: testFP16Vec1
50 // CHECK: %[[V0:.*]] = load <4 x half>, ptr @hv1, align 8
51 // CHECK: %[[CONV:.*]] = fpext <4 x half> %[[V0]] to <4 x float>
52 // CHECK: %[[V1:.*]] = load <4 x half>, ptr @hv0, align 8
53 // CHECK: %[[CONV1:.*]] = fpext <4 x half> %[[V1]] to <4 x float>
54 // CHECK: %[[ADD:.*]] = fadd <4 x float> %[[CONV1]], %[[CONV]]
55 // CHECK: %[[CONV2:.*]] = fptrunc <4 x float> %[[ADD]] to <4 x half>
56 // CHECK: store <4 x half> %[[CONV2]], ptr @hv0, align 8
57 // CHECK: %[[V2:.*]] = load <4 x half>, ptr @hv1, align 8
58 // CHECK: %[[CONV3:.*]] = fpext <4 x half> %[[V2]] to <4 x float>
59 // CHECK: %[[V3:.*]] = load <4 x half>, ptr @hv0, align 8
60 // CHECK: %[[CONV4:.*]] = fpext <4 x half> %[[V3]] to <4 x float>
61 // CHECK: %[[SUB:.*]] = fsub <4 x float> %[[CONV4]], %[[CONV3]]
62 // CHECK: %[[CONV5:.*]] = fptrunc <4 x float> %[[SUB]] to <4 x half>
63 // CHECK: store <4 x half> %[[CONV5]], ptr @hv0, align 8
64 // CHECK: %[[V4:.*]] = load <4 x half>, ptr @hv1, align 8
65 // CHECK: %[[CONV6:.*]] = fpext <4 x half> %[[V4]] to <4 x float>
66 // CHECK: %[[V5:.*]] = load <4 x half>, ptr @hv0, align 8
67 // CHECK: %[[CONV7:.*]] = fpext <4 x half> %[[V5]] to <4 x float>
68 // CHECK: %[[MUL:.*]] = fmul <4 x float> %[[CONV7]], %[[CONV6]]
69 // CHECK: %[[CONV8:.*]] = fptrunc <4 x float> %[[MUL]] to <4 x half>
70 // CHECK: store <4 x half> %[[CONV8]], ptr @hv0, align 8
71 // CHECK: %[[V6:.*]] = load <4 x half>, ptr @hv1, align 8
72 // CHECK: %[[CONV9:.*]] = fpext <4 x half> %[[V6]] to <4 x float>
73 // CHECK: %[[V7:.*]] = load <4 x half>, ptr @hv0, align 8
74 // CHECK: %[[CONV10:.*]] = fpext <4 x half> %[[V7]] to <4 x float>
75 // CHECK: %[[DIV:.*]] = fdiv <4 x float> %[[CONV10]], %[[CONV9]]
76 // CHECK: %[[CONV11:.*]] = fptrunc <4 x float> %[[DIV]] to <4 x half>
77 // CHECK: store <4 x half> %[[CONV11]], ptr @hv0, align 8
86 // CHECK-LABEL: testFP16Vec2
87 // CHECK: %[[CADDR:.*]] = alloca i32, align 4
88 // CHECK: store i32 %[[C:.*]], ptr %[[CADDR]], align 4
89 // CHECK: %[[V0:.*]] = load i32, ptr %[[CADDR]], align 4
90 // CHECK: %[[TOBOOL:.*]] = icmp ne i32 %[[V0]], 0
91 // CHECK: br i1 %[[TOBOOL]], label %{{.*}}, label %{{.*}}
93 // CHECK: %[[V1:.*]] = load <4 x half>, ptr @hv0, align 8
94 // CHECK: br label %{{.*}}
96 // CHECK: %[[V2:.*]] = load <4 x half>, ptr @hv1, align 8
97 // CHECK: br label %{{.*}}
99 // CHECK: %[[COND:.*]] = phi <4 x half> [ %[[V1]], %{{.*}} ], [ %[[V2]], %{{.*}} ]
100 // CHECK: store <4 x half> %[[COND]], ptr @hv0, align 8
102 void testFP16Vec2(int c
) {
106 // CHECK-LABEL: testFP16Vec3
107 // CHECK: %[[V0:.*]] = load <4 x half>, ptr @hv0, align 8
108 // CHECK: %[[CONV:.*]] = fpext <4 x half> %[[V0]] to <4 x float>
109 // CHECK: %[[V1:.*]] = load <4 x half>, ptr @hv1, align 8
110 // CHECK: %[[CONV1:.*]] = fpext <4 x half> %[[V1]] to <4 x float>
111 // CHECK: %[[CMP:.*]] = fcmp oeq <4 x float> %[[CONV]], %[[CONV1]]
112 // CHECK: %[[SEXT:.*]] = sext <4 x i1> %[[CMP]] to <4 x i32>
113 // CHECK: %[[CONV2:.*]] = trunc <4 x i32> %[[SEXT]] to <4 x i16>
114 // CHECK: store <4 x i16> %[[CONV2]], ptr @sv0, align 8
115 // CHECK: %[[V2:.*]] = load <4 x half>, ptr @hv0, align 8
116 // CHECK: %[[CONV3:.*]] = fpext <4 x half> %[[V2]] to <4 x float>
117 // CHECK: %[[V3:.*]] = load <4 x half>, ptr @hv1, align 8
118 // CHECK: %[[CONV4:.*]] = fpext <4 x half> %[[V3]] to <4 x float>
119 // CHECK: %[[CMP5:.*]] = fcmp une <4 x float> %[[CONV3]], %[[CONV4]]
120 // CHECK: %[[SEXT6:.*]] = sext <4 x i1> %[[CMP5]] to <4 x i32>
121 // CHECK: %[[CONV7:.*]] = trunc <4 x i32> %[[SEXT6]] to <4 x i16>
122 // CHECK: store <4 x i16> %[[CONV7]], ptr @sv0, align 8
123 // CHECK: %[[V4:.*]] = load <4 x half>, ptr @hv0, align 8
124 // CHECK: %[[CONV8:.*]] = fpext <4 x half> %[[V4]] to <4 x float>
125 // CHECK: %[[V5:.*]] = load <4 x half>, ptr @hv1, align 8
126 // CHECK: %[[CONV9:.*]] = fpext <4 x half> %[[V5]] to <4 x float>
127 // CHECK: %[[CMP10:.*]] = fcmp olt <4 x float> %[[CONV8]], %[[CONV9]]
128 // CHECK: %[[SEXT11:.*]] = sext <4 x i1> %[[CMP10]] to <4 x i32>
129 // CHECK: %[[CONV12:.*]] = trunc <4 x i32> %[[SEXT11]] to <4 x i16>
130 // CHECK: store <4 x i16> %[[CONV12]], ptr @sv0, align 8
131 // CHECK: %[[V6:.*]] = load <4 x half>, ptr @hv0, align 8
132 // CHECK: %[[CONV13:.*]] = fpext <4 x half> %[[V6]] to <4 x float>
133 // CHECK: %[[V7:.*]] = load <4 x half>, ptr @hv1, align 8
134 // CHECK: %[[CONV14:.*]] = fpext <4 x half> %[[V7]] to <4 x float>
135 // CHECK: %[[CMP15:.*]] = fcmp ogt <4 x float> %[[CONV13]], %[[CONV14]]
136 // CHECK: %[[SEXT16:.*]] = sext <4 x i1> %[[CMP15]] to <4 x i32>
137 // CHECK: %[[CONV17:.*]] = trunc <4 x i32> %[[SEXT16]] to <4 x i16>
138 // CHECK: store <4 x i16> %[[CONV17]], ptr @sv0, align 8
139 // CHECK: %[[V8:.*]] = load <4 x half>, ptr @hv0, align 8
140 // CHECK: %[[CONV18:.*]] = fpext <4 x half> %[[V8]] to <4 x float>
141 // CHECK: %[[V9:.*]] = load <4 x half>, ptr @hv1, align 8
142 // CHECK: %[[CONV19:.*]] = fpext <4 x half> %[[V9]] to <4 x float>
143 // CHECK: %[[CMP20:.*]] = fcmp ole <4 x float> %[[CONV18]], %[[CONV19]]
144 // CHECK: %[[SEXT21:.*]] = sext <4 x i1> %[[CMP20]] to <4 x i32>
145 // CHECK: %[[CONV22:.*]] = trunc <4 x i32> %[[SEXT21]] to <4 x i16>
146 // CHECK: store <4 x i16> %[[CONV22]], ptr @sv0, align 8
147 // CHECK: %[[V10:.*]] = load <4 x half>, ptr @hv0, align 8
148 // CHECK: %[[CONV23:.*]] = fpext <4 x half> %[[V10]] to <4 x float>
149 // CHECK: %[[V11:.*]] = load <4 x half>, ptr @hv1, align 8
150 // CHECK: %[[CONV24:.*]] = fpext <4 x half> %[[V11]] to <4 x float>
151 // CHECK: %[[CMP25:.*]] = fcmp oge <4 x float> %[[CONV23]], %[[CONV24]]
152 // CHECK: %[[SEXT26:.*]] = sext <4 x i1> %[[CMP25]] to <4 x i32>
153 // CHECK: %[[CONV27:.*]] = trunc <4 x i32> %[[SEXT26]] to <4 x i16>
154 // CHECK: store <4 x i16> %[[CONV27]], ptr @sv0, align 8
156 void testFP16Vec3() {