1 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
2 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu %s -o - -enable-shrink-wrap=false -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
4 ; Note: Lots of tests use inline asm instead of regular calls.
5 ; This allows to have a better control on what the allocation will do.
6 ; Otherwise, we may have spill right in the entry block, defeating
7 ; shrink-wrapping. Moreover, some of the inline asm statement (nop)
8 ; are here to ensure that the related paths do not end up as critical
12 ; Initial motivating example: Simple diamond with a call just on one side.
15 ; Compare the arguments and return
17 ; ENABLE: cmpw 0, 3, 4
18 ; ENABLE-NEXT: bgelr 0
21 ; At a minimum, we save/restore the link register. Other registers may be saved
25 ; Compare the arguments and jump to exit.
26 ; After the prologue is set.
27 ; DISABLE: cmpw 0, 3, 4
28 ; DISABLE-NEXT: bge 0, .[[EXIT_LABEL:LBB[0-9_]+]]
30 ; Store %a on the stack
31 ; CHECK: stw 3, {{[0-9]+([0-9]+)}}
32 ; Set the alloca address in the second argument.
33 ; CHECK-NEXT: addi 4, 1, {{[0-9]+}}
34 ; Set the first argument to zero.
36 ; CHECK-NEXT: bl doSomething
38 ; With shrink-wrapping, epilogue is just after the call.
39 ; Restore the link register and return.
40 ; Note that there could be other epilog code before the link register is
41 ; restored but we will not check for it here.
45 ; DISABLE: [[EXIT_LABEL]]:
47 ; Without shrink-wrapping, epilogue is in the exit block.
48 ; Epilogue code. (What we pop does not matter.)
49 ; DISABLE: mtlr {{[0-9]+}}
53 define i32 @foo(i32 %a, i32 %b) {
54 %tmp = alloca i32, align 4
55 %tmp2 = icmp slt i32 %a, %b
56 br i1 %tmp2, label %true, label %false
59 store i32 %a, i32* %tmp, align 4
60 %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
64 %tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
68 ; Function Attrs: optsize
69 declare i32 @doSomething(i32, i32*)
73 ; Check that we do not perform the restore inside the loop whereas the save
75 ; CHECK-LABEL: freqSaveAndRestoreOutsideLoop:
77 ; Shrink-wrapping allows to skip the prologue in the else case.
78 ; ENABLE: cmplwi 0, 3, 0
79 ; ENABLE: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
82 ; Make sure we save the link register
83 ; CHECK: mflr {{[0-9]+}}
85 ; DISABLE: cmplwi 0, 3, 0
86 ; DISABLE: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
89 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
90 ; CHECK-DAG: li [[IV:[0-9]+]], 10
93 ; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
95 ; CHECK-DAG: addi [[IV]], [[IV]], -1
96 ; CHECK-DAG: add [[SUM]], 3, [[SUM]]
97 ; CHECK-NEXT: cmplwi [[IV]], 0
98 ; CHECK-NEXT: bne 0, .[[LOOP]]
101 ; CHECK: slwi 3, [[SUM]], 3
104 ; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
106 ; DISABLE: .[[ELSE_LABEL]]: # %if.else
107 ; Shift second argument by one and store into returned register.
108 ; DISABLE: slwi 3, 4, 1
109 ; DISABLE: .[[EPILOG_BB]]: # %if.end
112 ; CHECK: mtlr {{[0-9]+}}
115 ; ENABLE: .[[ELSE_LABEL]]: # %if.else
116 ; Shift second argument by one and store into returned register.
117 ; ENABLE: slwi 3, 4, 1
119 define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
121 %tobool = icmp eq i32 %cond, 0
122 br i1 %tobool, label %if.else, label %for.preheader
125 tail call void asm "nop", ""()
128 for.body: ; preds = %entry, %for.body
129 %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
130 %sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
131 %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
132 %add = add nsw i32 %call, %sum.04
133 %inc = add nuw nsw i32 %i.05, 1
134 %exitcond = icmp eq i32 %inc, 10
135 br i1 %exitcond, label %for.end, label %for.body
137 for.end: ; preds = %for.body
138 %shl = shl i32 %add, 3
141 if.else: ; preds = %entry
142 %mul = shl nsw i32 %N, 1
145 if.end: ; preds = %if.else, %for.end
146 %sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
150 declare i32 @something(...)
152 ; Check that we do not perform the shrink-wrapping inside the loop even
153 ; though that would be legal. The cost model must prevent that.
154 ; CHECK-LABEL: freqSaveAndRestoreOutsideLoop2:
156 ; Make sure we save the link register before the call
157 ; CHECK: mflr {{[0-9]+}}
160 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
161 ; CHECK-DAG: li [[IV:[0-9]+]], 10
164 ; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
165 ; CHECK: bl something
166 ; CHECK-DAG: addi [[IV]], [[IV]], -1
167 ; CHECK-DAG: add [[SUM]], 3, [[SUM]]
168 ; CHECK-NEXT: cmplwi [[IV]], 0
169 ; CHECK-NEXT: bne 0, .[[LOOP]]
173 ; CHECK: mtlr {{[0-9]+}}
175 define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
177 br label %for.preheader
180 tail call void asm "nop", ""()
183 for.body: ; preds = %for.body, %entry
184 %i.04 = phi i32 [ 0, %for.preheader ], [ %inc, %for.body ]
185 %sum.03 = phi i32 [ 0, %for.preheader ], [ %add, %for.body ]
186 %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
187 %add = add nsw i32 %call, %sum.03
188 %inc = add nuw nsw i32 %i.04, 1
189 %exitcond = icmp eq i32 %inc, 10
190 br i1 %exitcond, label %for.exit, label %for.body
193 tail call void asm "nop", ""()
196 for.end: ; preds = %for.body
201 ; Check with a more complex case that we do not have save within the loop and
203 ; CHECK-LABEL: loopInfoSaveOutsideLoop:
205 ; ENABLE: cmplwi 0, 3, 0
206 ; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
209 ; Make sure we save the link register
210 ; CHECK: mflr {{[0-9]+}}
214 ; DISABLE: cmplwi 0, 3, 0
215 ; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
218 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
219 ; CHECK-DAG: li [[IV:[0-9]+]], 10
222 ; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
223 ; CHECK: bl something
224 ; CHECK-DAG: addi [[IV]], [[IV]], -1
225 ; CHECK-DAG: add [[SUM]], 3, [[SUM]]
226 ; CHECK-NEXT: cmplwi [[IV]], 0
227 ; CHECK-NEXT: bne 0, .[[LOOP]]
230 ; CHECK: bl somethingElse
231 ; CHECK: slwi 3, [[SUM]], 3
234 ; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
236 ; DISABLE: .[[ELSE_LABEL]]: # %if.else
237 ; Shift second argument by one and store into returned register.
238 ; DISABLE: slwi 3, 4, 1
240 ; DISABLE: .[[EPILOG_BB]]: # %if.end
242 ; CHECK: mtlr {{[0-9]+}}
245 ; ENABLE: .[[ELSE_LABEL]]: # %if.else
246 ; Shift second argument by one and store into returned register.
247 ; ENABLE: slwi 3, 4, 1
249 define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
251 %tobool = icmp eq i32 %cond, 0
252 br i1 %tobool, label %if.else, label %for.preheader
255 tail call void asm "nop", ""()
258 for.body: ; preds = %entry, %for.body
259 %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
260 %sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
261 %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
262 %add = add nsw i32 %call, %sum.04
263 %inc = add nuw nsw i32 %i.05, 1
264 %exitcond = icmp eq i32 %inc, 10
265 br i1 %exitcond, label %for.end, label %for.body
267 for.end: ; preds = %for.body
268 tail call void bitcast (void (...)* @somethingElse to void ()*)()
269 %shl = shl i32 %add, 3
272 if.else: ; preds = %entry
273 %mul = shl nsw i32 %N, 1
276 if.end: ; preds = %if.else, %for.end
277 %sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
281 declare void @somethingElse(...)
283 ; Check with a more complex case that we do not have restore within the loop and
285 ; CHECK-LABEL: loopInfoRestoreOutsideLoop:
287 ; ENABLE: cmplwi 0, 3, 0
288 ; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
291 ; Make sure we save the link register
292 ; CHECK: mflr {{[0-9]+}}
296 ; DISABLE: cmplwi 0, 3, 0
297 ; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
299 ; CHECK: bl somethingElse
302 ; CHECK-DAG: li [[SUM:[0-9]+]], 0
303 ; CHECK-DAG: li [[IV:[0-9]+]], 10
306 ; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
307 ; CHECK: bl something
308 ; CHECK-DAG: addi [[IV]], [[IV]], -1
309 ; CHECK-DAG: add [[SUM]], 3, [[SUM]]
310 ; CHECK-NEXT: cmplwi [[IV]], 0
311 ; CHECK-NEXT: bne 0, .[[LOOP]]
316 ; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
318 ; DISABLE: .[[ELSE_LABEL]]: # %if.else
319 ; Shift second argument by one and store into returned register.
320 ; DISABLE: slwi 3, 4, 1
321 ; DISABLE: .[[EPILOG_BB]]: # %if.end
324 ; CHECK: mtlr {{[0-9]+}}
327 ; ENABLE: .[[ELSE_LABEL]]: # %if.else
328 ; Shift second argument by one and store into returned register.
329 ; ENABLE: slwi 3, 4, 1
331 define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
333 %tobool = icmp eq i32 %cond, 0
334 br i1 %tobool, label %if.else, label %if.then
336 if.then: ; preds = %entry
337 tail call void bitcast (void (...)* @somethingElse to void ()*)()
340 for.body: ; preds = %for.body, %if.then
341 %i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
342 %sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
343 %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
344 %add = add nsw i32 %call, %sum.04
345 %inc = add nuw nsw i32 %i.05, 1
346 %exitcond = icmp eq i32 %inc, 10
347 br i1 %exitcond, label %for.end, label %for.body
349 for.end: ; preds = %for.body
350 %shl = shl i32 %add, 3
353 if.else: ; preds = %entry
354 %mul = shl nsw i32 %N, 1
357 if.end: ; preds = %if.else, %for.end
358 %sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
362 ; Check that we handle function with no frame information correctly.
363 ; CHECK-LABEL: emptyFrame:
365 ; CHECK-NEXT: li 3, 0
367 define i32 @emptyFrame() {
373 ; Check that we handle inline asm correctly.
374 ; CHECK-LABEL: inlineAsm:
376 ; ENABLE: cmplwi 0, 3, 0
377 ; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
380 ; Make sure we save the CSR used in the inline asm: r14
381 ; ENABLE-DAG: li [[IV:[0-9]+]], 10
382 ; ENABLE-DAG: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
384 ; DISABLE: cmplwi 0, 3, 0
385 ; DISABLE-NEXT: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
386 ; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
387 ; DISABLE: li [[IV:[0-9]+]], 10
390 ; CHECK: mtctr [[IV]]
392 ; CHECK: .[[LOOP_LABEL:LBB[0-9_]+]]: # %for.body
393 ; Inline asm statement.
394 ; CHECK: addi 14, 14, 1
395 ; CHECK: bdnz .[[LOOP_LABEL]]
399 ; CHECK-DAG: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
403 ; CHECK: [[ELSE_LABEL]]
404 ; CHECK-NEXT: slwi 3, 4, 1
405 ; DISABLE: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
408 define i32 @inlineAsm(i32 %cond, i32 %N) {
410 %tobool = icmp eq i32 %cond, 0
411 br i1 %tobool, label %if.else, label %for.preheader
414 tail call void asm "nop", ""()
417 for.body: ; preds = %entry, %for.body
418 %i.03 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
419 tail call void asm "addi 14, 14, 1", "~{r14}"()
420 %inc = add nuw nsw i32 %i.03, 1
421 %exitcond = icmp eq i32 %inc, 10
422 br i1 %exitcond, label %for.exit, label %for.body
425 tail call void asm "nop", ""()
428 if.else: ; preds = %entry
429 %mul = shl nsw i32 %N, 1
432 if.end: ; preds = %for.body, %if.else
433 %sum.0 = phi i32 [ %mul, %if.else ], [ 0, %for.exit ]
438 ; Check that we handle calls to variadic functions correctly.
439 ; CHECK-LABEL: callVariadicFunc:
441 ; ENABLE: cmplwi 0, 3, 0
442 ; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
445 ; CHECK: mflr {{[0-9]+}}
447 ; DISABLE: cmplwi 0, 3, 0
448 ; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
450 ; Setup of the varags.
452 ; CHECK-NEXT: mr 5, 3
453 ; CHECK-NEXT: mr 6, 3
454 ; CHECK-NEXT: mr 7, 3
455 ; CHECK-NEXT: mr 8, 3
456 ; CHECK-NEXT: mr 9, 3
457 ; CHECK-NEXT: bl someVariadicFunc
458 ; CHECK: slwi 3, 3, 3
459 ; DISABLE: b .[[EPILOGUE_BB:LBB[0-9_]+]]
461 ; ENABLE: mtlr {{[0-9]+}}
464 ; CHECK: .[[ELSE_LABEL]]: # %if.else
465 ; CHECK-NEXT: slwi 3, 4, 1
467 ; DISABLE: .[[EPILOGUE_BB]]: # %if.end
470 define i32 @callVariadicFunc(i32 %cond, i32 %N) {
472 %tobool = icmp eq i32 %cond, 0
473 br i1 %tobool, label %if.else, label %if.then
475 if.then: ; preds = %entry
476 %call = tail call i32 (i32, ...) @someVariadicFunc(i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N)
477 %shl = shl i32 %call, 3
480 if.else: ; preds = %entry
481 %mul = shl nsw i32 %N, 1
484 if.end: ; preds = %if.else, %if.then
485 %sum.0 = phi i32 [ %shl, %if.then ], [ %mul, %if.else ]
489 declare i32 @someVariadicFunc(i32, ...)
493 ; Make sure we do not insert unreachable code after noreturn function.
494 ; Although this is not incorrect to insert such code, it is useless
495 ; and it hurts the binary size.
497 ; CHECK-LABEL: noreturn:
498 ; DISABLE: mflr {{[0-9]+}}
500 ; CHECK: cmplwi 0, 3, 0
501 ; CHECK-NEXT: bne{{[-]?}} 0, .[[ABORT:LBB[0-9_]+]]
505 ; DISABLE: mtlr {{[0-9]+}}
509 ; CHECK: .[[ABORT]]: # %if.abort
511 ; ENABLE: mflr {{[0-9]+}}
514 ; ENABLE-NOT: mtlr {{[0-9]+}}
515 define i32 @noreturn(i8 signext %bad_thing) {
517 %tobool = icmp eq i8 %bad_thing, 0
518 br i1 %tobool, label %if.end, label %if.abort
521 tail call void @abort() #0
528 declare void @abort() #0
530 attributes #0 = { noreturn nounwind }
533 ; Make sure that we handle infinite loops properly When checking that the Save
534 ; and Restore blocks are control flow equivalent, the loop searches for the
535 ; immediate (post) dominator for the (restore) save blocks. When either the Save
536 ; or Restore block is located in an infinite loop the only immediate (post)
537 ; dominator is itself. In this case, we cannot perform shrink wrapping, but we
538 ; should return gracefully and continue compilation.
539 ; The only condition for this test is the compilation finishes correctly.
541 ; CHECK-LABEL: infiniteloop
543 define void @infiniteloop() {
545 br i1 undef, label %if.then, label %if.end
548 %ptr = alloca i32, i32 4
551 for.body: ; preds = %for.body, %entry
552 %sum.03 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
553 %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
554 %add = add nsw i32 %call, %sum.03
555 store i32 %add, i32* %ptr
562 ; Another infinite loop test this time with a body bigger than just one block.
563 ; CHECK-LABEL: infiniteloop2
565 define void @infiniteloop2() {
567 br i1 undef, label %if.then, label %if.end
570 %ptr = alloca i32, i32 4
573 for.body: ; preds = %for.body, %entry
574 %sum.03 = phi i32 [ 0, %if.then ], [ %add, %body1 ], [ 1, %body2]
575 %call = tail call i32 asm "mftb $0, 268", "=r,~{r14}"()
576 %add = add nsw i32 %call, %sum.03
577 store i32 %add, i32* %ptr
578 br i1 undef, label %body1, label %body2
581 tail call void asm sideeffect "nop", "~{r14}"()
585 tail call void asm sideeffect "nop", "~{r14}"()
592 ; Another infinite loop test this time with two nested infinite loop.
593 ; CHECK-LABEL: infiniteloop3
594 ; CHECK: Lfunc_begin[[FUNCNUM:[0-9]+]]
596 ; CHECK: Lfunc_end[[FUNCNUM]]
597 define void @infiniteloop3() {
599 br i1 undef, label %loop2a, label %body
601 body: ; preds = %entry
602 br i1 undef, label %loop2a, label %end
604 loop1: ; preds = %loop2a, %loop2b
605 %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
606 %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
607 %0 = icmp eq i32* %var, null
608 %next.load = load i32*, i32** undef
609 br i1 %0, label %loop2a, label %loop2b
611 loop2a: ; preds = %loop1, %body, %entry
612 %var = phi i32* [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
613 %next.var = phi i32* [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
616 loop2b: ; preds = %loop1
617 %gep1 = bitcast i32* %var.phi to i32*
618 %next.ptr = bitcast i32* %gep1 to i32**
619 store i32* %next.phi, i32** %next.ptr
626 @columns = external global [0 x i32], align 4
627 @lock = common global i32 0, align 4
628 @htindex = common global i32 0, align 4
629 @stride = common global i32 0, align 4
630 @ht = common global i32* null, align 8
631 @he = common global i8* null, align 8
633 ; Test for a bug that was caused when save point was equal to restore point.
634 ; Function Attrs: nounwind
635 ; CHECK-LABEL: transpose
637 ; Store of callee-save register saved by shrink wrapping
638 ; FIXME: Test disabled: Improved scheduling needs no spills/reloads any longer!
639 ; CHECKXX: std [[CSR:[0-9]+]], -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
641 ; Reload of callee-save register
642 ; CHECKXX: ld [[CSR]], -[[STACK_OFFSET]](1) # 8-byte Folded Reload
644 ; Ensure no subsequent uses of callee-save register before end of function
645 ; CHECK-NOT: {{[a-z]+}} [[CSR]]
647 define signext i32 @transpose() {
649 %0 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 1), align 4
650 %shl.i = shl i32 %0, 7
651 %1 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 2), align 4
652 %or.i = or i32 %shl.i, %1
653 %shl1.i = shl i32 %or.i, 7
654 %2 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 3), align 4
655 %or2.i = or i32 %shl1.i, %2
656 %3 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 7), align 4
657 %shl3.i = shl i32 %3, 7
658 %4 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 6), align 4
659 %or4.i = or i32 %shl3.i, %4
660 %shl5.i = shl i32 %or4.i, 7
661 %5 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 5), align 4
662 %or6.i = or i32 %shl5.i, %5
663 %cmp.i = icmp ugt i32 %or2.i, %or6.i
664 br i1 %cmp.i, label %cond.true.i, label %cond.false.i
667 %shl7.i = shl i32 %or2.i, 7
668 %6 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4
669 %or8.i = or i32 %6, %shl7.i
670 %conv.i = zext i32 %or8.i to i64
671 %shl9.i = shl nuw nsw i64 %conv.i, 21
672 %conv10.i = zext i32 %or6.i to i64
673 %or11.i = or i64 %shl9.i, %conv10.i
677 %shl12.i = shl i32 %or6.i, 7
678 %7 = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @columns, i64 0, i64 4), align 4
679 %or13.i = or i32 %7, %shl12.i
680 %conv14.i = zext i32 %or13.i to i64
681 %shl15.i = shl nuw nsw i64 %conv14.i, 21
682 %conv16.i = zext i32 %or2.i to i64
683 %or17.i = or i64 %shl15.i, %conv16.i
687 %cond.i = phi i64 [ %or11.i, %cond.true.i ], [ %or17.i, %cond.false.i ]
688 %shr.29.i = lshr i64 %cond.i, 17
689 %conv18.i = trunc i64 %shr.29.i to i32
690 store i32 %conv18.i, i32* @lock, align 4
691 %rem.i = srem i64 %cond.i, 1050011
692 %conv19.i = trunc i64 %rem.i to i32
693 store i32 %conv19.i, i32* @htindex, align 4
694 %rem20.i = urem i32 %conv18.i, 179
695 %add.i = or i32 %rem20.i, 131072
696 store i32 %add.i, i32* @stride, align 4
697 %8 = load i32*, i32** @ht, align 8
698 %arrayidx = getelementptr inbounds i32, i32* %8, i64 %rem.i
699 %9 = load i32, i32* %arrayidx, align 4
700 %cmp1 = icmp eq i32 %9, %conv18.i
701 br i1 %cmp1, label %if.then, label %if.end
704 %idxprom.lcssa = phi i64 [ %rem.i, %hash.exit ], [ %idxprom.1, %if.end ], [ %idxprom.2, %if.end.1 ], [ %idxprom.3, %if.end.2 ], [ %idxprom.4, %if.end.3 ], [ %idxprom.5, %if.end.4 ], [ %idxprom.6, %if.end.5 ], [ %idxprom.7, %if.end.6 ]
705 %10 = load i8*, i8** @he, align 8
706 %arrayidx3 = getelementptr inbounds i8, i8* %10, i64 %idxprom.lcssa
707 %11 = load i8, i8* %arrayidx3, align 1
708 %conv = sext i8 %11 to i32
712 %add = add nsw i32 %add.i, %conv19.i
713 %cmp4 = icmp sgt i32 %add, 1050010
714 %sub = add nsw i32 %add, -1050011
715 %sub.add = select i1 %cmp4, i32 %sub, i32 %add
716 %idxprom.1 = sext i32 %sub.add to i64
717 %arrayidx.1 = getelementptr inbounds i32, i32* %8, i64 %idxprom.1
718 %12 = load i32, i32* %arrayidx.1, align 4
719 %cmp1.1 = icmp eq i32 %12, %conv18.i
720 br i1 %cmp1.1, label %if.then, label %if.end.1
723 %retval.0 = phi i32 [ %conv, %if.then ], [ -128, %if.end.6 ]
727 %add.1 = add nsw i32 %add.i, %sub.add
728 %cmp4.1 = icmp sgt i32 %add.1, 1050010
729 %sub.1 = add nsw i32 %add.1, -1050011
730 %sub.add.1 = select i1 %cmp4.1, i32 %sub.1, i32 %add.1
731 %idxprom.2 = sext i32 %sub.add.1 to i64
732 %arrayidx.2 = getelementptr inbounds i32, i32* %8, i64 %idxprom.2
733 %13 = load i32, i32* %arrayidx.2, align 4
734 %cmp1.2 = icmp eq i32 %13, %conv18.i
735 br i1 %cmp1.2, label %if.then, label %if.end.2
738 %add.2 = add nsw i32 %add.i, %sub.add.1
739 %cmp4.2 = icmp sgt i32 %add.2, 1050010
740 %sub.2 = add nsw i32 %add.2, -1050011
741 %sub.add.2 = select i1 %cmp4.2, i32 %sub.2, i32 %add.2
742 %idxprom.3 = sext i32 %sub.add.2 to i64
743 %arrayidx.3 = getelementptr inbounds i32, i32* %8, i64 %idxprom.3
744 %14 = load i32, i32* %arrayidx.3, align 4
745 %cmp1.3 = icmp eq i32 %14, %conv18.i
746 br i1 %cmp1.3, label %if.then, label %if.end.3
749 %add.3 = add nsw i32 %add.i, %sub.add.2
750 %cmp4.3 = icmp sgt i32 %add.3, 1050010
751 %sub.3 = add nsw i32 %add.3, -1050011
752 %sub.add.3 = select i1 %cmp4.3, i32 %sub.3, i32 %add.3
753 %idxprom.4 = sext i32 %sub.add.3 to i64
754 %arrayidx.4 = getelementptr inbounds i32, i32* %8, i64 %idxprom.4
755 %15 = load i32, i32* %arrayidx.4, align 4
756 %cmp1.4 = icmp eq i32 %15, %conv18.i
757 br i1 %cmp1.4, label %if.then, label %if.end.4
760 %add.4 = add nsw i32 %add.i, %sub.add.3
761 %cmp4.4 = icmp sgt i32 %add.4, 1050010
762 %sub.4 = add nsw i32 %add.4, -1050011
763 %sub.add.4 = select i1 %cmp4.4, i32 %sub.4, i32 %add.4
764 %idxprom.5 = sext i32 %sub.add.4 to i64
765 %arrayidx.5 = getelementptr inbounds i32, i32* %8, i64 %idxprom.5
766 %16 = load i32, i32* %arrayidx.5, align 4
767 %cmp1.5 = icmp eq i32 %16, %conv18.i
768 br i1 %cmp1.5, label %if.then, label %if.end.5
771 %add.5 = add nsw i32 %add.i, %sub.add.4
772 %cmp4.5 = icmp sgt i32 %add.5, 1050010
773 %sub.5 = add nsw i32 %add.5, -1050011
774 %sub.add.5 = select i1 %cmp4.5, i32 %sub.5, i32 %add.5
775 %idxprom.6 = sext i32 %sub.add.5 to i64
776 %arrayidx.6 = getelementptr inbounds i32, i32* %8, i64 %idxprom.6
777 %17 = load i32, i32* %arrayidx.6, align 4
778 %cmp1.6 = icmp eq i32 %17, %conv18.i
779 br i1 %cmp1.6, label %if.then, label %if.end.6
782 %add.6 = add nsw i32 %add.i, %sub.add.5
783 %cmp4.6 = icmp sgt i32 %add.6, 1050010
784 %sub.6 = add nsw i32 %add.6, -1050011
785 %sub.add.6 = select i1 %cmp4.6, i32 %sub.6, i32 %add.6
786 %idxprom.7 = sext i32 %sub.add.6 to i64
787 %arrayidx.7 = getelementptr inbounds i32, i32* %8, i64 %idxprom.7
788 %18 = load i32, i32* %arrayidx.7, align 4
789 %cmp1.7 = icmp eq i32 %18, %conv18.i
790 br i1 %cmp1.7, label %if.then, label %cleanup