1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefix=X64
5 define i64 @PR69965(ptr %input_ptrs, ptr %output_ptrs) {
7 ; X86: # %bb.0: # %entry
8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
9 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
10 ; X86-NEXT: movl (%eax), %eax
11 ; X86-NEXT: movzbl (%eax), %eax
13 ; X86-NEXT: movl %eax, %edx
14 ; X86-NEXT: shll $8, %edx
15 ; X86-NEXT: movl (%ecx), %ecx
16 ; X86-NEXT: addb %al, %al
17 ; X86-NEXT: movzbl %al, %eax
18 ; X86-NEXT: orl %edx, %eax
19 ; X86-NEXT: orl $32768, %eax # imm = 0x8000
20 ; X86-NEXT: movw %ax, (%ecx)
21 ; X86-NEXT: xorl %eax, %eax
22 ; X86-NEXT: xorl %edx, %edx
26 ; X64: # %bb.0: # %entry
27 ; X64-NEXT: movq (%rdi), %rax
28 ; X64-NEXT: movzbl (%rax), %eax
30 ; X64-NEXT: leal (%rax,%rax), %ecx
31 ; X64-NEXT: # kill: def $eax killed $eax killed $rax
32 ; X64-NEXT: shll $8, %eax
33 ; X64-NEXT: movq (%rsi), %rdx
34 ; X64-NEXT: movzbl %cl, %ecx
35 ; X64-NEXT: orl %eax, %ecx
36 ; X64-NEXT: orl $32768, %ecx # imm = 0x8000
37 ; X64-NEXT: movw %cx, (%rdx)
38 ; X64-NEXT: xorl %eax, %eax
41 %0 = load ptr, ptr %input_ptrs, align 8
42 %.val.i = load i8, ptr %0, align 1
43 %1 = and i8 %.val.i, 127
46 %4 = zext i8 %3 to i16
47 %5 = load ptr, ptr %output_ptrs, align 8
48 %6 = shl nuw i16 %4, 8
50 %8 = zext i8 %7 to i16
52 store i16 %9, ptr %5, align 2