1 ; RUN: llc -O0 < %s | FileCheck %s
3 target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
4 target triple = "msp430---elf"
6 @g_double = global double 123.0, align 8
7 @g_float = global float 123.0, align 8
8 @g_i32 = global i32 123, align 8
9 @g_i64 = global i64 456, align 8
10 @g_i16 = global i16 789, align 8
12 define float @d2f() #0 {
16 ; CHECK: call #__mspabi_cvtdf
17 %0 = load volatile double, double* @g_double, align 8
18 %1 = fptrunc double %0 to float
23 define double @f2d() #0 {
27 ; CHECK: call #__mspabi_cvtfd
28 %0 = load volatile float, float* @g_float, align 8
29 %1 = fpext float %0 to double
34 define i32 @d2l() #0 {
38 ; CHECK: call #__mspabi_fixdli
39 %0 = load volatile double, double* @g_double, align 8
40 %1 = fptosi double %0 to i32
45 define i64 @d2ll() #0 {
49 ; CHECK: call #__mspabi_fixdlli
50 %0 = load volatile double, double* @g_double, align 8
51 %1 = fptosi double %0 to i64
56 define i32 @d2ul() #0 {
60 ; CHECK: call #__mspabi_fixdul
61 %0 = load volatile double, double* @g_double, align 8
62 %1 = fptoui double %0 to i32
67 define i64 @d2ull() #0 {
71 ; CHECK: call #__mspabi_fixdull
72 %0 = load volatile double, double* @g_double, align 8
73 %1 = fptoui double %0 to i64
78 define i32 @f2l() #0 {
82 ; CHECK: call #__mspabi_fixfli
83 %0 = load volatile float, float* @g_float, align 8
84 %1 = fptosi float %0 to i32
89 define i64 @f2ll() #0 {
93 ; CHECK: call #__mspabi_fixflli
94 %0 = load volatile float, float* @g_float, align 8
95 %1 = fptosi float %0 to i64
100 define i32 @f2ul() #0 {
104 ; CHECK: call #__mspabi_fixful
105 %0 = load volatile float, float* @g_float, align 8
106 %1 = fptoui float %0 to i32
111 define i64 @f2ull() #0 {
115 ; CHECK: call #__mspabi_fixfull
116 %0 = load volatile float, float* @g_float, align 8
117 %1 = fptoui float %0 to i64
122 define double @l2d() #0 {
126 ; CHECK: call #__mspabi_fltlid
127 %0 = load volatile i32, i32* @g_i32, align 8
128 %1 = sitofp i32 %0 to double
133 define double @ll2d() #0 {
137 ; CHECK: call #__mspabi_fltllid
138 %0 = load volatile i64, i64* @g_i64, align 8
139 %1 = sitofp i64 %0 to double
144 define double @ul2d() #0 {
148 ; CHECK: call #__mspabi_fltuld
149 %0 = load volatile i32, i32* @g_i32, align 8
150 %1 = uitofp i32 %0 to double
155 define double @ull2d() #0 {
159 ; CHECK: call #__mspabi_fltulld
160 %0 = load volatile i64, i64* @g_i64, align 8
161 %1 = uitofp i64 %0 to double
166 define float @l2f() #0 {
170 ; CHECK: call #__mspabi_fltlif
171 %0 = load volatile i32, i32* @g_i32, align 8
172 %1 = sitofp i32 %0 to float
177 define float @ll2f() #0 {
181 ; CHECK: call #__mspabi_fltllif
182 %0 = load volatile i64, i64* @g_i64, align 8
183 %1 = sitofp i64 %0 to float
188 define float @ul2f() #0 {
192 ; CHECK: call #__mspabi_fltulf
193 %0 = load volatile i32, i32* @g_i32, align 8
194 %1 = uitofp i32 %0 to float
199 define float @ull2f() #0 {
203 ; CHECK: call #__mspabi_fltullf
204 %0 = load volatile i64, i64* @g_i64, align 8
205 %1 = uitofp i64 %0 to float
210 define i1 @cmpd_oeq() #0 {
214 ; CHECK: call #__mspabi_cmpd
215 %0 = load volatile double, double* @g_double, align 8
216 %1 = fcmp oeq double %0, 123.0
221 define i1 @cmpd_une() #0 {
225 ; CHECK: call #__mspabi_cmpd
226 %0 = load volatile double, double* @g_double, align 8
227 %1 = fcmp une double %0, 123.0
232 define i1 @cmpd_oge() #0 {
236 ; CHECK: call #__mspabi_cmpd
237 %0 = load volatile double, double* @g_double, align 8
238 %1 = fcmp oge double %0, 123.0
243 define i1 @cmpd_olt() #0 {
247 ; CHECK: call #__mspabi_cmpd
248 %0 = load volatile double, double* @g_double, align 8
249 %1 = fcmp olt double %0, 123.0
254 define i1 @cmpd_ole() #0 {
258 ; CHECK: call #__mspabi_cmpd
259 %0 = load volatile double, double* @g_double, align 8
260 %1 = fcmp ole double %0, 123.0
265 define i1 @cmpd_ogt() #0 {
269 ; CHECK: call #__mspabi_cmpd
270 %0 = load volatile double, double* @g_double, align 8
271 %1 = fcmp ogt double %0, 123.0
276 define i1 @cmpf_oeq() #0 {
280 ; CHECK: call #__mspabi_cmpf
281 %0 = load volatile float, float* @g_float, align 8
282 %1 = fcmp oeq float %0, 123.0
287 define i1 @cmpf_une() #0 {
291 ; CHECK: call #__mspabi_cmpf
292 %0 = load volatile float, float* @g_float, align 8
293 %1 = fcmp une float %0, 123.0
298 define i1 @cmpf_oge() #0 {
302 ; CHECK: call #__mspabi_cmpf
303 %0 = load volatile float, float* @g_float, align 8
304 %1 = fcmp oge float %0, 123.0
309 define i1 @cmpf_olt() #0 {
313 ; CHECK: call #__mspabi_cmpf
314 %0 = load volatile float, float* @g_float, align 8
315 %1 = fcmp olt float %0, 123.0
320 define i1 @cmpf_ole() #0 {
324 ; CHECK: call #__mspabi_cmpf
325 %0 = load volatile float, float* @g_float, align 8
326 %1 = fcmp ole float %0, 123.0
331 define i1 @cmpf_ogt() #0 {
335 ; CHECK: call #__mspabi_cmpf
336 %0 = load volatile float, float* @g_float, align 8
337 %1 = fcmp ogt float %0, 123.0
342 define double @addd() #0 {
346 ; CHECK: call #__mspabi_addd
347 %0 = load volatile double, double* @g_double, align 8
348 %1 = fadd double %0, 123.0
353 define float @addf() #0 {
357 ; CHECK: call #__mspabi_addf
358 %0 = load volatile float, float* @g_float, align 8
359 %1 = fadd float %0, 123.0
364 define double @divd() #0 {
368 ; CHECK: call #__mspabi_divd
369 %0 = load volatile double, double* @g_double, align 8
370 %1 = fdiv double %0, 123.0
375 define float @divf() #0 {
379 ; CHECK: call #__mspabi_divf
380 %0 = load volatile float, float* @g_float, align 8
381 %1 = fdiv float %0, 123.0
386 define double @mpyd() #0 {
390 ; CHECK: call #__mspabi_mpyd
391 %0 = load volatile double, double* @g_double, align 8
392 %1 = fmul double %0, 123.0
397 define float @mpyf() #0 {
401 ; CHECK: call #__mspabi_mpyf
402 %0 = load volatile float, float* @g_float, align 8
403 %1 = fmul float %0, 123.0
408 define double @subd() #0 {
412 ; CHECK: call #__mspabi_subd
413 %0 = load volatile double, double* @g_double, align 8
414 %1 = fsub double %0, %0
419 define float @subf() #0 {
423 ; CHECK: call #__mspabi_subf
424 %0 = load volatile float, float* @g_float, align 8
425 %1 = fsub float %0, %0
430 define i16 @divi() #0 {
434 ; CHECK: call #__mspabi_divi
435 %0 = load volatile i16, i16* @g_i16, align 8
436 %1 = load volatile i16, i16* @g_i16, align 8
442 define i32 @divli() #0 {
446 ; CHECK: call #__mspabi_divli
447 %0 = load volatile i32, i32* @g_i32, align 8
448 %1 = load volatile i32, i32* @g_i32, align 8
454 define i64 @divlli() #0 {
458 ; CHECK: call #__mspabi_divlli
459 %0 = load volatile i64, i64* @g_i64, align 8
460 %1 = load volatile i64, i64* @g_i64, align 8
466 define i16 @divu() #0 {
470 ; CHECK: call #__mspabi_divu
471 %0 = load volatile i16, i16* @g_i16, align 8
472 %1 = load volatile i16, i16* @g_i16, align 8
478 define i32 @divul() #0 {
482 ; CHECK: call #__mspabi_divul
483 %0 = load volatile i32, i32* @g_i32, align 8
484 %1 = load volatile i32, i32* @g_i32, align 8
490 define i64 @divull() #0 {
494 ; CHECK: call #__mspabi_divull
495 %0 = load volatile i64, i64* @g_i64, align 8
496 %1 = load volatile i64, i64* @g_i64, align 8
502 define i16 @remi() #0 {
506 ; CHECK: call #__mspabi_remi
507 %0 = load volatile i16, i16* @g_i16, align 8
508 %1 = load volatile i16, i16* @g_i16, align 8
514 define i32 @remli() #0 {
518 ; CHECK: call #__mspabi_remli
519 %0 = load volatile i32, i32* @g_i32, align 8
520 %1 = load volatile i32, i32* @g_i32, align 8
526 define i64 @remlli() #0 {
530 ; CHECK: call #__mspabi_remlli
531 %0 = load volatile i64, i64* @g_i64, align 8
532 %1 = load volatile i64, i64* @g_i64, align 8
538 define i16 @remu() #0 {
542 ; CHECK: call #__mspabi_remu
543 %0 = load volatile i16, i16* @g_i16, align 8
544 %1 = load volatile i16, i16* @g_i16, align 8
550 define i32 @remul() #0 {
554 ; CHECK: call #__mspabi_remul
555 %0 = load volatile i32, i32* @g_i32, align 8
556 %1 = load volatile i32, i32* @g_i32, align 8
562 define i64 @remull() #0 {
566 ; CHECK: call #__mspabi_remull
567 %0 = load volatile i64, i64* @g_i64, align 8
568 %1 = load volatile i64, i64* @g_i64, align 8
574 define i16 @mpyi() #0 {
578 ; CHECK: call #__mspabi_mpyi
579 %0 = load volatile i16, i16* @g_i16, align 8
585 define i32 @mpyli() #0 {
589 ; CHECK: call #__mspabi_mpyl
590 %0 = load volatile i32, i32* @g_i32, align 8
596 define i64 @mpylli() #0 {
600 ; CHECK: call #__mspabi_mpyll
601 %0 = load volatile i64, i64* @g_i64, align 8
607 @i = external global i32, align 2
609 define i32 @srll() #0 {
612 ; CHECK: call #__mspabi_srll
613 %0 = load volatile i32, i32* @g_i32, align 2
614 %1 = load volatile i32, i32* @i, align 2
615 %shr = lshr i32 %0, %1
620 define i32 @sral() #0 {
623 ; CHECK: call #__mspabi_sral
624 %0 = load volatile i32, i32* @g_i32, align 2
625 %1 = load volatile i32, i32* @i, align 2
626 %shr = ashr i32 %0, %1
631 define i32 @slll() #0 {
634 ; CHECK: call #__mspabi_slll
635 %0 = load volatile i32, i32* @g_i32, align 2
636 %1 = load volatile i32, i32* @i, align 2
637 %shr = shl i32 %0, %1
642 attributes #0 = { nounwind }