1 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=k8 | FileCheck %s
2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=opteron | FileCheck %s
3 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=athlon64 | FileCheck %s
4 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=athlon-fx | FileCheck %s
5 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=k8-sse3 | FileCheck %s
6 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=opteron-sse3 | FileCheck %s
7 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=athlon64-sse3 | FileCheck %s
8 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=amdfam10 | FileCheck %s
9 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=btver1 | FileCheck %s
10 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=btver2 | FileCheck %s
11 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=bdver1 | FileCheck %s
12 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=bdver2 | FileCheck %s
13 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=bdver3 | FileCheck %s
14 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=bdver4 | FileCheck %s
15 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver1 | FileCheck %s
16 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver2 | FileCheck %s
17 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3 | FileCheck %s
18 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s
20 ; Verify that for the X86_64 processors that are known to have poor latency
21 ; double precision shift instructions we do not generate 'shld' or 'shrd'
24 ;uint64_t lshift(uint64_t a, uint64_t b, int c)
26 ; return (a << c) | (b >> (64-c));
29 define i64 @lshift(i64 %a, i64 %b, i32 %c) nounwind readnone {
32 %sh_prom = zext i32 %c to i64
33 %shl = shl i64 %a, %sh_prom
34 %sub = sub nsw i32 64, %c
35 %sh_prom1 = zext i32 %sub to i64
36 %shr = lshr i64 %b, %sh_prom1
37 %or = or i64 %shr, %shl
41 ;uint64_t rshift(uint64_t a, uint64_t b, int c)
43 ; return (a >> c) | (b << (64-c));
46 define i64 @rshift(i64 %a, i64 %b, i32 %c) nounwind readnone {
49 %sh_prom = zext i32 %c to i64
50 %shr = lshr i64 %a, %sh_prom
51 %sub = sub nsw i32 64, %c
52 %sh_prom1 = zext i32 %sub to i64
53 %shl = shl i64 %b, %sh_prom1
54 %or = or i64 %shl, %shr