1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
5 define void @i24_or(i24* %a) {
8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
9 ; X86-NEXT: movzwl (%ecx), %edx
10 ; X86-NEXT: movzbl 2(%ecx), %eax
11 ; X86-NEXT: movb %al, 2(%ecx)
12 ; X86-NEXT: shll $16, %eax
13 ; X86-NEXT: orl %edx, %eax
14 ; X86-NEXT: orl $384, %eax # imm = 0x180
15 ; X86-NEXT: movw %ax, (%ecx)
20 ; X64-NEXT: movzwl (%rdi), %eax
21 ; X64-NEXT: movzbl 2(%rdi), %ecx
22 ; X64-NEXT: movb %cl, 2(%rdi)
23 ; X64-NEXT: shll $16, %ecx
24 ; X64-NEXT: orl %eax, %ecx
25 ; X64-NEXT: orl $384, %ecx # imm = 0x180
26 ; X64-NEXT: movw %cx, (%rdi)
28 %aa = load i24, i24* %a, align 1
30 store i24 %b, i24* %a, align 1
34 define void @i24_and_or(i24* %a) {
35 ; X86-LABEL: i24_and_or:
37 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
38 ; X86-NEXT: movzwl (%ecx), %edx
39 ; X86-NEXT: movzbl 2(%ecx), %eax
40 ; X86-NEXT: movb %al, 2(%ecx)
41 ; X86-NEXT: shll $16, %eax
42 ; X86-NEXT: orl %edx, %eax
43 ; X86-NEXT: orl $384, %eax # imm = 0x180
44 ; X86-NEXT: andl $16777088, %eax # imm = 0xFFFF80
45 ; X86-NEXT: movw %ax, (%ecx)
48 ; X64-LABEL: i24_and_or:
50 ; X64-NEXT: movzwl (%rdi), %eax
51 ; X64-NEXT: movzbl 2(%rdi), %ecx
52 ; X64-NEXT: movb %cl, 2(%rdi)
53 ; X64-NEXT: shll $16, %ecx
54 ; X64-NEXT: orl %eax, %ecx
55 ; X64-NEXT: orl $384, %ecx # imm = 0x180
56 ; X64-NEXT: andl $16777088, %ecx # imm = 0xFFFF80
57 ; X64-NEXT: movw %cx, (%rdi)
59 %b = load i24, i24* %a, align 1
62 store i24 %d, i24* %a, align 1
66 define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
67 ; X86-LABEL: i24_insert_bit:
69 ; X86-NEXT: pushl %esi
70 ; X86-NEXT: .cfi_def_cfa_offset 8
71 ; X86-NEXT: .cfi_offset %esi, -8
72 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
73 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
74 ; X86-NEXT: movzwl (%ecx), %esi
75 ; X86-NEXT: movzbl 2(%ecx), %eax
76 ; X86-NEXT: movb %al, 2(%ecx)
77 ; X86-NEXT: shll $16, %eax
78 ; X86-NEXT: orl %esi, %eax
79 ; X86-NEXT: shll $13, %edx
80 ; X86-NEXT: andl $16769023, %eax # imm = 0xFFDFFF
81 ; X86-NEXT: orl %edx, %eax
82 ; X86-NEXT: movw %ax, (%ecx)
84 ; X86-NEXT: .cfi_def_cfa_offset 4
87 ; X64-LABEL: i24_insert_bit:
89 ; X64-NEXT: movzwl (%rdi), %eax
90 ; X64-NEXT: movzbl 2(%rdi), %ecx
91 ; X64-NEXT: movb %cl, 2(%rdi)
92 ; X64-NEXT: shll $16, %ecx
93 ; X64-NEXT: orl %eax, %ecx
94 ; X64-NEXT: shll $13, %esi
95 ; X64-NEXT: andl $16769023, %ecx # imm = 0xFFDFFF
96 ; X64-NEXT: orl %esi, %ecx
97 ; X64-NEXT: movw %cx, (%rdi)
99 %extbit = zext i1 %bit to i24
100 %b = load i24, i24* %a, align 1
101 %extbit.shl = shl nuw nsw i24 %extbit, 13
102 %c = and i24 %b, -8193
103 %d = or i24 %c, %extbit.shl
104 store i24 %d, i24* %a, align 1
108 define void @i56_or(i56* %a) {
111 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
112 ; X86-NEXT: orl $384, (%eax) # imm = 0x180
117 ; X64-NEXT: movzwl 4(%rdi), %eax
118 ; X64-NEXT: movzbl 6(%rdi), %ecx
119 ; X64-NEXT: movb %cl, 6(%rdi)
120 ; X64-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
121 ; X64-NEXT: shll $16, %ecx
122 ; X64-NEXT: orl %eax, %ecx
123 ; X64-NEXT: shlq $32, %rcx
124 ; X64-NEXT: movl (%rdi), %eax
125 ; X64-NEXT: orq %rcx, %rax
126 ; X64-NEXT: orq $384, %rax # imm = 0x180
127 ; X64-NEXT: movl %eax, (%rdi)
128 ; X64-NEXT: shrq $32, %rax
129 ; X64-NEXT: movw %ax, 4(%rdi)
131 %aa = load i56, i56* %a, align 1
133 store i56 %b, i56* %a, align 1
137 define void @i56_and_or(i56* %a) {
138 ; X86-LABEL: i56_and_or:
140 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
141 ; X86-NEXT: movl $384, %ecx # imm = 0x180
142 ; X86-NEXT: orl (%eax), %ecx
143 ; X86-NEXT: andl $-128, %ecx
144 ; X86-NEXT: movl %ecx, (%eax)
147 ; X64-LABEL: i56_and_or:
149 ; X64-NEXT: movzwl 4(%rdi), %eax
150 ; X64-NEXT: movzbl 6(%rdi), %ecx
151 ; X64-NEXT: movb %cl, 6(%rdi)
152 ; X64-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx
153 ; X64-NEXT: shll $16, %ecx
154 ; X64-NEXT: orl %eax, %ecx
155 ; X64-NEXT: shlq $32, %rcx
156 ; X64-NEXT: movl (%rdi), %eax
157 ; X64-NEXT: orq %rcx, %rax
158 ; X64-NEXT: orq $384, %rax # imm = 0x180
159 ; X64-NEXT: movabsq $72057594037927808, %rcx # imm = 0xFFFFFFFFFFFF80
160 ; X64-NEXT: andq %rax, %rcx
161 ; X64-NEXT: movl %ecx, (%rdi)
162 ; X64-NEXT: shrq $32, %rcx
163 ; X64-NEXT: movw %cx, 4(%rdi)
165 %b = load i56, i56* %a, align 1
166 %c = and i56 %b, -128
168 store i56 %d, i56* %a, align 1
172 define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
173 ; X86-LABEL: i56_insert_bit:
175 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
176 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
177 ; X86-NEXT: shll $13, %ecx
178 ; X86-NEXT: movl $-8193, %edx # imm = 0xDFFF
179 ; X86-NEXT: andl (%eax), %edx
180 ; X86-NEXT: orl %ecx, %edx
181 ; X86-NEXT: movl %edx, (%eax)
184 ; X64-LABEL: i56_insert_bit:
186 ; X64-NEXT: movl %esi, %eax
187 ; X64-NEXT: movzwl 4(%rdi), %ecx
188 ; X64-NEXT: movzbl 6(%rdi), %edx
189 ; X64-NEXT: movb %dl, 6(%rdi)
190 ; X64-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx
191 ; X64-NEXT: shll $16, %edx
192 ; X64-NEXT: orl %ecx, %edx
193 ; X64-NEXT: shlq $32, %rdx
194 ; X64-NEXT: movl (%rdi), %ecx
195 ; X64-NEXT: orq %rdx, %rcx
196 ; X64-NEXT: shlq $13, %rax
197 ; X64-NEXT: movabsq $72057594037919743, %rdx # imm = 0xFFFFFFFFFFDFFF
198 ; X64-NEXT: andq %rcx, %rdx
199 ; X64-NEXT: orq %rax, %rdx
200 ; X64-NEXT: movl %edx, (%rdi)
201 ; X64-NEXT: shrq $32, %rdx
202 ; X64-NEXT: movw %dx, 4(%rdi)
204 %extbit = zext i1 %bit to i56
205 %b = load i56, i56* %a, align 1
206 %extbit.shl = shl nuw nsw i56 %extbit, 13
207 %c = and i56 %b, -8193
208 %d = or i56 %c, %extbit.shl
209 store i56 %d, i56* %a, align 1