1 ; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
2 target triple = "thumbv7-apple-ios"
6 ; The load must go into d0-15 which are all clobbered by the asm.
7 ; RAGreedy should split the range and use d16-d31 to avoid a spill.
15 define void @local_split(float* nocapture %p) nounwind ssp {
17 %x = load float, float* %p, align 4
18 %a = fadd float %x, 1.0
19 tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
20 store float %a, float* %p, align 4
26 ; Same thing, but across basic blocks.
34 define void @global_split(float* nocapture %p1, float* nocapture %p2) nounwind ssp {
36 %0 = load float, float* %p1, align 4
37 %add = fadd float %0, 1.000000e+00
38 tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
39 %cmp = fcmp ogt float %add, 0.000000e+00
40 br i1 %cmp, label %if.then, label %if.end
43 store float %add, float* %p2, align 4
47 store float %add, float* %p1, align 4