1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
5 define void @i24_or(ptr %a) {
8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
9 ; X86-NEXT: movzwl (%eax), %ecx
10 ; X86-NEXT: movzbl 2(%eax), %edx
11 ; X86-NEXT: shll $16, %edx
12 ; X86-NEXT: orl %ecx, %edx
13 ; X86-NEXT: orl $384, %edx # imm = 0x180
14 ; X86-NEXT: movw %dx, (%eax)
19 ; X64-NEXT: movzwl (%rdi), %eax
20 ; X64-NEXT: movzbl 2(%rdi), %ecx
21 ; X64-NEXT: shll $16, %ecx
22 ; X64-NEXT: orl %eax, %ecx
23 ; X64-NEXT: orl $384, %ecx # imm = 0x180
24 ; X64-NEXT: movw %cx, (%rdi)
26 %aa = load i24, ptr %a, align 1
28 store i24 %b, ptr %a, align 1
32 define void @i24_and_or(ptr %a) {
33 ; X86-LABEL: i24_and_or:
35 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
36 ; X86-NEXT: movzwl (%eax), %ecx
37 ; X86-NEXT: movzbl 2(%eax), %edx
38 ; X86-NEXT: shll $16, %edx
39 ; X86-NEXT: orl %ecx, %edx
40 ; X86-NEXT: orl $384, %edx # imm = 0x180
41 ; X86-NEXT: andl $-128, %edx
42 ; X86-NEXT: movw %dx, (%eax)
45 ; X64-LABEL: i24_and_or:
47 ; X64-NEXT: movzwl (%rdi), %eax
48 ; X64-NEXT: movzbl 2(%rdi), %ecx
49 ; X64-NEXT: shll $16, %ecx
50 ; X64-NEXT: orl %eax, %ecx
51 ; X64-NEXT: orl $384, %ecx # imm = 0x180
52 ; X64-NEXT: andl $-128, %ecx
53 ; X64-NEXT: movw %cx, (%rdi)
55 %b = load i24, ptr %a, align 1
58 store i24 %d, ptr %a, align 1
62 define void @i24_insert_bit(ptr %a, i1 zeroext %bit) {
63 ; X86-LABEL: i24_insert_bit:
65 ; X86-NEXT: pushl %esi
66 ; X86-NEXT: .cfi_def_cfa_offset 8
67 ; X86-NEXT: .cfi_offset %esi, -8
68 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
69 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
70 ; X86-NEXT: movzwl (%eax), %edx
71 ; X86-NEXT: movzbl 2(%eax), %esi
72 ; X86-NEXT: shll $16, %esi
73 ; X86-NEXT: orl %edx, %esi
74 ; X86-NEXT: shll $13, %ecx
75 ; X86-NEXT: andl $16769023, %esi # imm = 0xFFDFFF
76 ; X86-NEXT: orl %ecx, %esi
77 ; X86-NEXT: movw %si, (%eax)
79 ; X86-NEXT: .cfi_def_cfa_offset 4
82 ; X64-LABEL: i24_insert_bit:
84 ; X64-NEXT: movzwl (%rdi), %eax
85 ; X64-NEXT: movzbl 2(%rdi), %ecx
86 ; X64-NEXT: shll $16, %ecx
87 ; X64-NEXT: orl %eax, %ecx
88 ; X64-NEXT: shll $13, %esi
89 ; X64-NEXT: andl $16769023, %ecx # imm = 0xFFDFFF
90 ; X64-NEXT: orl %esi, %ecx
91 ; X64-NEXT: movw %cx, (%rdi)
93 %extbit = zext i1 %bit to i24
94 %b = load i24, ptr %a, align 1
95 %extbit.shl = shl nuw nsw i24 %extbit, 13
96 %c = and i24 %b, -8193
97 %d = or i24 %c, %extbit.shl
98 store i24 %d, ptr %a, align 1
102 define void @i56_or(ptr %a) {
105 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
106 ; X86-NEXT: orl $384, (%eax) # imm = 0x180
111 ; X64-NEXT: orl $384, (%rdi) # imm = 0x180
113 %aa = load i56, ptr %a, align 1
115 store i56 %b, ptr %a, align 1
119 define void @i56_and_or(ptr %a) {
120 ; X86-LABEL: i56_and_or:
122 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
123 ; X86-NEXT: movl $384, %ecx # imm = 0x180
124 ; X86-NEXT: orl (%eax), %ecx
125 ; X86-NEXT: andl $-128, %ecx
126 ; X86-NEXT: movl %ecx, (%eax)
129 ; X64-LABEL: i56_and_or:
131 ; X64-NEXT: movzwl 4(%rdi), %eax
132 ; X64-NEXT: movzbl 6(%rdi), %ecx
133 ; X64-NEXT: shll $16, %ecx
134 ; X64-NEXT: orl %eax, %ecx
135 ; X64-NEXT: shlq $32, %rcx
136 ; X64-NEXT: movl (%rdi), %eax
137 ; X64-NEXT: orq %rcx, %rax
138 ; X64-NEXT: orq $384, %rax # imm = 0x180
139 ; X64-NEXT: movabsq $72057594037927808, %rcx # imm = 0xFFFFFFFFFFFF80
140 ; X64-NEXT: andq %rax, %rcx
141 ; X64-NEXT: movl %ecx, (%rdi)
142 ; X64-NEXT: shrq $32, %rcx
143 ; X64-NEXT: movw %cx, 4(%rdi)
145 %b = load i56, ptr %a, align 1
146 %c = and i56 %b, -128
148 store i56 %d, ptr %a, align 1
152 define void @i56_insert_bit(ptr %a, i1 zeroext %bit) {
153 ; X86-LABEL: i56_insert_bit:
155 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
156 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
157 ; X86-NEXT: shll $13, %ecx
158 ; X86-NEXT: movl $-8193, %edx # imm = 0xDFFF
159 ; X86-NEXT: andl (%eax), %edx
160 ; X86-NEXT: orl %ecx, %edx
161 ; X86-NEXT: movl %edx, (%eax)
164 ; X64-LABEL: i56_insert_bit:
166 ; X64-NEXT: movzwl 4(%rdi), %eax
167 ; X64-NEXT: movzbl 6(%rdi), %ecx
168 ; X64-NEXT: shll $16, %ecx
169 ; X64-NEXT: orl %eax, %ecx
170 ; X64-NEXT: shlq $32, %rcx
171 ; X64-NEXT: movl (%rdi), %eax
172 ; X64-NEXT: orq %rcx, %rax
173 ; X64-NEXT: shll $13, %esi
174 ; X64-NEXT: andq $-8193, %rax # imm = 0xDFFF
175 ; X64-NEXT: orl %eax, %esi
176 ; X64-NEXT: shrq $32, %rax
177 ; X64-NEXT: movw %ax, 4(%rdi)
178 ; X64-NEXT: movl %esi, (%rdi)
180 %extbit = zext i1 %bit to i56
181 %b = load i56, ptr %a, align 1
182 %extbit.shl = shl nuw nsw i56 %extbit, 13
183 %c = and i56 %b, -8193
184 %d = or i56 %c, %extbit.shl
185 store i56 %d, ptr %a, align 1