1 //===-- X86InstrShiftRotate.td - Shift and Rotate Instrs ---*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the shift and rotate instructions.
11 //===----------------------------------------------------------------------===//
13 // FIXME: Someone needs to smear multipattern goodness all over this file.
15 let Defs = [EFLAGS] in {
17 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
18 let Uses = [CL], SchedRW = [WriteShiftCL] in {
19 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
20 "shl{b}\t{%cl, $dst|$dst, cl}",
21 [(set GR8:$dst, (shl GR8:$src1, CL))]>;
22 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
23 "shl{w}\t{%cl, $dst|$dst, cl}",
24 [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize16;
25 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
26 "shl{l}\t{%cl, $dst|$dst, cl}",
27 [(set GR32:$dst, (shl GR32:$src1, CL))]>, OpSize32;
28 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
29 "shl{q}\t{%cl, $dst|$dst, cl}",
30 [(set GR64:$dst, (shl GR64:$src1, CL))]>;
31 } // Uses = [CL], SchedRW
33 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
34 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
35 "shl{b}\t{$src2, $dst|$dst, $src2}",
36 [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
38 def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
39 "shl{w}\t{$src2, $dst|$dst, $src2}",
40 [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>,
42 def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
43 "shl{l}\t{$src2, $dst|$dst, $src2}",
44 [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>,
46 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
47 (ins GR64:$src1, u8imm:$src2),
48 "shl{q}\t{$src2, $dst|$dst, $src2}",
49 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
50 } // isConvertibleToThreeAddress = 1
52 // NOTE: We don't include patterns for shifts of a register by one, because
53 // 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
54 let hasSideEffects = 0 in {
55 def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
57 def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
58 "shl{w}\t$dst", []>, OpSize16;
59 def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
60 "shl{l}\t$dst", []>, OpSize32;
61 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
63 } // hasSideEffects = 0
64 } // Constraints = "$src = $dst", SchedRW
66 // FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern
68 let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
69 def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
70 "shl{b}\t{%cl, $dst|$dst, cl}",
71 [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
72 def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
73 "shl{w}\t{%cl, $dst|$dst, cl}",
74 [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>,
76 def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
77 "shl{l}\t{%cl, $dst|$dst, cl}",
78 [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>,
80 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
81 "shl{q}\t{%cl, $dst|$dst, cl}",
82 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
83 Requires<[In64BitMode]>;
86 let SchedRW = [WriteShiftLd, WriteRMW] in {
87 def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, u8imm:$src),
88 "shl{b}\t{$src, $dst|$dst, $src}",
89 [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
90 def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, u8imm:$src),
91 "shl{w}\t{$src, $dst|$dst, $src}",
92 [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
94 def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, u8imm:$src),
95 "shl{l}\t{$src, $dst|$dst, $src}",
96 [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
98 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, u8imm:$src),
99 "shl{q}\t{$src, $dst|$dst, $src}",
100 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
101 Requires<[In64BitMode]>;
104 def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
106 [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
107 def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
109 [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
111 def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
113 [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
115 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
117 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
118 Requires<[In64BitMode]>;
121 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
122 let Uses = [CL], SchedRW = [WriteShiftCL] in {
123 def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
124 "shr{b}\t{%cl, $dst|$dst, cl}",
125 [(set GR8:$dst, (srl GR8:$src1, CL))]>;
126 def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
127 "shr{w}\t{%cl, $dst|$dst, cl}",
128 [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize16;
129 def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
130 "shr{l}\t{%cl, $dst|$dst, cl}",
131 [(set GR32:$dst, (srl GR32:$src1, CL))]>, OpSize32;
132 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
133 "shr{q}\t{%cl, $dst|$dst, cl}",
134 [(set GR64:$dst, (srl GR64:$src1, CL))]>;
137 def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$src2),
138 "shr{b}\t{$src2, $dst|$dst, $src2}",
139 [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
140 def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
141 "shr{w}\t{$src2, $dst|$dst, $src2}",
142 [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>,
144 def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
145 "shr{l}\t{$src2, $dst|$dst, $src2}",
146 [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>,
148 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$src2),
149 "shr{q}\t{$src2, $dst|$dst, $src2}",
150 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
153 def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
155 [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
156 def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
158 [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize16;
159 def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
161 [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>, OpSize32;
162 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
164 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
165 } // Constraints = "$src = $dst", SchedRW
168 let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
169 def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
170 "shr{b}\t{%cl, $dst|$dst, cl}",
171 [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
172 def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
173 "shr{w}\t{%cl, $dst|$dst, cl}",
174 [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
176 def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
177 "shr{l}\t{%cl, $dst|$dst, cl}",
178 [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>,
180 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
181 "shr{q}\t{%cl, $dst|$dst, cl}",
182 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
183 Requires<[In64BitMode]>;
186 let SchedRW = [WriteShiftLd, WriteRMW] in {
187 def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, u8imm:$src),
188 "shr{b}\t{$src, $dst|$dst, $src}",
189 [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
190 def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, u8imm:$src),
191 "shr{w}\t{$src, $dst|$dst, $src}",
192 [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
194 def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, u8imm:$src),
195 "shr{l}\t{$src, $dst|$dst, $src}",
196 [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
198 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, u8imm:$src),
199 "shr{q}\t{$src, $dst|$dst, $src}",
200 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
201 Requires<[In64BitMode]>;
204 def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
206 [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
207 def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
209 [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
211 def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
213 [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
215 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
217 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
218 Requires<[In64BitMode]>;
221 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
222 let Uses = [CL], SchedRW = [WriteShiftCL] in {
223 def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
224 "sar{b}\t{%cl, $dst|$dst, cl}",
225 [(set GR8:$dst, (sra GR8:$src1, CL))]>;
226 def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
227 "sar{w}\t{%cl, $dst|$dst, cl}",
228 [(set GR16:$dst, (sra GR16:$src1, CL))]>,
230 def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
231 "sar{l}\t{%cl, $dst|$dst, cl}",
232 [(set GR32:$dst, (sra GR32:$src1, CL))]>,
234 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
235 "sar{q}\t{%cl, $dst|$dst, cl}",
236 [(set GR64:$dst, (sra GR64:$src1, CL))]>;
239 def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
240 "sar{b}\t{$src2, $dst|$dst, $src2}",
241 [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
242 def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
243 "sar{w}\t{$src2, $dst|$dst, $src2}",
244 [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
246 def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
247 "sar{l}\t{$src2, $dst|$dst, $src2}",
248 [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>,
250 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
251 (ins GR64:$src1, u8imm:$src2),
252 "sar{q}\t{$src2, $dst|$dst, $src2}",
253 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
256 def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
258 [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
259 def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
261 [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize16;
262 def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
264 [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>, OpSize32;
265 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
267 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
268 } // Constraints = "$src = $dst", SchedRW
271 let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
272 def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
273 "sar{b}\t{%cl, $dst|$dst, cl}",
274 [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
275 def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
276 "sar{w}\t{%cl, $dst|$dst, cl}",
277 [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>,
279 def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
280 "sar{l}\t{%cl, $dst|$dst, cl}",
281 [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>,
283 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
284 "sar{q}\t{%cl, $dst|$dst, cl}",
285 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
286 Requires<[In64BitMode]>;
289 let SchedRW = [WriteShiftLd, WriteRMW] in {
290 def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, u8imm:$src),
291 "sar{b}\t{$src, $dst|$dst, $src}",
292 [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
293 def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, u8imm:$src),
294 "sar{w}\t{$src, $dst|$dst, $src}",
295 [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
297 def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, u8imm:$src),
298 "sar{l}\t{$src, $dst|$dst, $src}",
299 [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
301 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, u8imm:$src),
302 "sar{q}\t{$src, $dst|$dst, $src}",
303 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
304 Requires<[In64BitMode]>;
307 def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
309 [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
310 def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
312 [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
314 def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
316 [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
318 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
320 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
321 Requires<[In64BitMode]>;
324 //===----------------------------------------------------------------------===//
325 // Rotate instructions
326 //===----------------------------------------------------------------------===//
328 let hasSideEffects = 0 in {
329 let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
331 let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
332 def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
333 "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
334 def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
335 "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
336 def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
337 "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
338 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
339 "rcl{q}\t{%cl, $dst|$dst, cl}", []>;
340 } // Uses = [CL, EFLAGS]
342 let Uses = [EFLAGS] in {
343 def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
345 def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
346 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
347 def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
348 "rcl{w}\t$dst", []>, OpSize16;
349 def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
350 "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
351 def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
352 "rcl{l}\t$dst", []>, OpSize32;
353 def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
354 "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
355 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
357 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
358 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
361 let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
362 def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
363 "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
364 def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
365 "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
366 def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
367 "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
368 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
369 "rcr{q}\t{%cl, $dst|$dst, cl}", []>;
370 } // Uses = [CL, EFLAGS]
372 let Uses = [EFLAGS] in {
373 def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
375 def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
376 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
377 def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
378 "rcr{w}\t$dst", []>, OpSize16;
379 def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
380 "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
381 def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
382 "rcr{l}\t$dst", []>, OpSize32;
383 def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
384 "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
385 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
387 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
388 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
391 } // Constraints = "$src = $dst"
393 let SchedRW = [WriteRotateLd, WriteRMW], mayStore = 1 in {
394 let Uses = [EFLAGS] in {
395 def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
397 def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, u8imm:$cnt),
398 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
399 def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
400 "rcl{w}\t$dst", []>, OpSize16;
401 def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, u8imm:$cnt),
402 "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
403 def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
404 "rcl{l}\t$dst", []>, OpSize32;
405 def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, u8imm:$cnt),
406 "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
407 def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
408 "rcl{q}\t$dst", []>, Requires<[In64BitMode]>;
409 def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, u8imm:$cnt),
410 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>,
411 Requires<[In64BitMode]>;
413 def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
415 def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, u8imm:$cnt),
416 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
417 def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
418 "rcr{w}\t$dst", []>, OpSize16;
419 def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, u8imm:$cnt),
420 "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
421 def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
422 "rcr{l}\t$dst", []>, OpSize32;
423 def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, u8imm:$cnt),
424 "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
425 def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
426 "rcr{q}\t$dst", []>, Requires<[In64BitMode]>;
427 def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, u8imm:$cnt),
428 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>,
429 Requires<[In64BitMode]>;
432 let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCLLd, WriteRMW] in {
433 def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
434 "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
435 def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
436 "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
437 def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
438 "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
439 def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
440 "rcl{q}\t{%cl, $dst|$dst, cl}", []>,
441 Requires<[In64BitMode]>;
443 def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
444 "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
445 def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
446 "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
447 def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
448 "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
449 def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
450 "rcr{q}\t{%cl, $dst|$dst, cl}", []>,
451 Requires<[In64BitMode]>;
452 } // Uses = [CL, EFLAGS]
454 } // hasSideEffects = 0
456 let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
457 // FIXME: provide shorter instructions when imm8 == 1
458 let Uses = [CL], SchedRW = [WriteRotateCL] in {
459 def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
460 "rol{b}\t{%cl, $dst|$dst, cl}",
461 [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
462 def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
463 "rol{w}\t{%cl, $dst|$dst, cl}",
464 [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize16;
465 def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
466 "rol{l}\t{%cl, $dst|$dst, cl}",
467 [(set GR32:$dst, (rotl GR32:$src1, CL))]>, OpSize32;
468 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
469 "rol{q}\t{%cl, $dst|$dst, cl}",
470 [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
473 def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
474 "rol{b}\t{$src2, $dst|$dst, $src2}",
475 [(set GR8:$dst, (rotl GR8:$src1, (i8 relocImm:$src2)))]>;
476 def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
477 "rol{w}\t{$src2, $dst|$dst, $src2}",
478 [(set GR16:$dst, (rotl GR16:$src1, (i8 relocImm:$src2)))]>,
480 def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
481 "rol{l}\t{$src2, $dst|$dst, $src2}",
482 [(set GR32:$dst, (rotl GR32:$src1, (i8 relocImm:$src2)))]>,
484 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
485 (ins GR64:$src1, u8imm:$src2),
486 "rol{q}\t{$src2, $dst|$dst, $src2}",
487 [(set GR64:$dst, (rotl GR64:$src1, (i8 relocImm:$src2)))]>;
490 def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
492 [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
493 def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
495 [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize16;
496 def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
498 [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>, OpSize32;
499 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
501 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
502 } // Constraints = "$src = $dst", SchedRW
504 let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
505 def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
506 "rol{b}\t{%cl, $dst|$dst, cl}",
507 [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
508 def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
509 "rol{w}\t{%cl, $dst|$dst, cl}",
510 [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
511 def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
512 "rol{l}\t{%cl, $dst|$dst, cl}",
513 [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
514 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
515 "rol{q}\t{%cl, $dst|$dst, cl}",
516 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
517 Requires<[In64BitMode]>;
520 let SchedRW = [WriteRotateLd, WriteRMW] in {
521 def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, u8imm:$src1),
522 "rol{b}\t{$src1, $dst|$dst, $src1}",
523 [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
524 def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, u8imm:$src1),
525 "rol{w}\t{$src1, $dst|$dst, $src1}",
526 [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
528 def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, u8imm:$src1),
529 "rol{l}\t{$src1, $dst|$dst, $src1}",
530 [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
532 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, u8imm:$src1),
533 "rol{q}\t{$src1, $dst|$dst, $src1}",
534 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
535 Requires<[In64BitMode]>;
538 def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
540 [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
541 def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
543 [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
545 def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
547 [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
549 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
551 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
552 Requires<[In64BitMode]>;
555 let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
556 let Uses = [CL], SchedRW = [WriteRotateCL] in {
557 def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
558 "ror{b}\t{%cl, $dst|$dst, cl}",
559 [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
560 def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
561 "ror{w}\t{%cl, $dst|$dst, cl}",
562 [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize16;
563 def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
564 "ror{l}\t{%cl, $dst|$dst, cl}",
565 [(set GR32:$dst, (rotr GR32:$src1, CL))]>, OpSize32;
566 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
567 "ror{q}\t{%cl, $dst|$dst, cl}",
568 [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
571 def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
572 "ror{b}\t{$src2, $dst|$dst, $src2}",
573 [(set GR8:$dst, (rotr GR8:$src1, (i8 relocImm:$src2)))]>;
574 def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
575 "ror{w}\t{$src2, $dst|$dst, $src2}",
576 [(set GR16:$dst, (rotr GR16:$src1, (i8 relocImm:$src2)))]>,
578 def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
579 "ror{l}\t{$src2, $dst|$dst, $src2}",
580 [(set GR32:$dst, (rotr GR32:$src1, (i8 relocImm:$src2)))]>,
582 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
583 (ins GR64:$src1, u8imm:$src2),
584 "ror{q}\t{$src2, $dst|$dst, $src2}",
585 [(set GR64:$dst, (rotr GR64:$src1, (i8 relocImm:$src2)))]>;
588 def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
590 [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
591 def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
593 [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize16;
594 def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
596 [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>, OpSize32;
597 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
599 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
600 } // Constraints = "$src = $dst", SchedRW
602 let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
603 def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
604 "ror{b}\t{%cl, $dst|$dst, cl}",
605 [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
606 def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
607 "ror{w}\t{%cl, $dst|$dst, cl}",
608 [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
609 def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
610 "ror{l}\t{%cl, $dst|$dst, cl}",
611 [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
612 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
613 "ror{q}\t{%cl, $dst|$dst, cl}",
614 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
615 Requires<[In64BitMode]>;
618 let SchedRW = [WriteRotateLd, WriteRMW] in {
619 def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, u8imm:$src),
620 "ror{b}\t{$src, $dst|$dst, $src}",
621 [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
622 def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, u8imm:$src),
623 "ror{w}\t{$src, $dst|$dst, $src}",
624 [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
626 def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, u8imm:$src),
627 "ror{l}\t{$src, $dst|$dst, $src}",
628 [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
630 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src),
631 "ror{q}\t{$src, $dst|$dst, $src}",
632 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
633 Requires<[In64BitMode]>;
636 def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
638 [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
639 def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
641 [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
643 def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
645 [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
647 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
649 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
650 Requires<[In64BitMode]>;
654 //===----------------------------------------------------------------------===//
655 // Double shift instructions (generalizations of rotate)
656 //===----------------------------------------------------------------------===//
658 let Constraints = "$src1 = $dst" in {
660 let Uses = [CL], SchedRW = [WriteSHDrrcl] in {
661 def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
662 (ins GR16:$src1, GR16:$src2),
663 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
664 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
666 def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
667 (ins GR16:$src1, GR16:$src2),
668 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
669 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
671 def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
672 (ins GR32:$src1, GR32:$src2),
673 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
674 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>,
676 def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
677 (ins GR32:$src1, GR32:$src2),
678 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
679 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>,
681 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
682 (ins GR64:$src1, GR64:$src2),
683 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
684 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
686 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
687 (ins GR64:$src1, GR64:$src2),
688 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
689 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
693 let isCommutable = 1, SchedRW = [WriteSHDrri] in { // These instructions commute to each other.
694 def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
696 (ins GR16:$src1, GR16:$src2, u8imm:$src3),
697 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
698 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
701 def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
703 (ins GR16:$src1, GR16:$src2, u8imm:$src3),
704 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
705 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
708 def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
710 (ins GR32:$src1, GR32:$src2, u8imm:$src3),
711 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
712 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
715 def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
717 (ins GR32:$src1, GR32:$src2, u8imm:$src3),
718 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
719 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
722 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
724 (ins GR64:$src1, GR64:$src2, u8imm:$src3),
725 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
726 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
729 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
731 (ins GR64:$src1, GR64:$src2, u8imm:$src3),
732 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
733 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
737 } // Constraints = "$src = $dst"
739 let Uses = [CL], SchedRW = [WriteSHDmrcl] in {
740 def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
741 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
742 [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
743 addr:$dst)]>, TB, OpSize16;
744 def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
745 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
746 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
747 addr:$dst)]>, TB, OpSize16;
749 def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
750 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
751 [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
752 addr:$dst)]>, TB, OpSize32;
753 def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
754 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
755 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
756 addr:$dst)]>, TB, OpSize32;
758 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
759 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
760 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
762 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
763 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
764 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
768 let SchedRW = [WriteSHDmri] in {
769 def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
770 (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
771 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
772 [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
773 (i8 imm:$src3)), addr:$dst)]>,
775 def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
776 (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
777 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
778 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
779 (i8 imm:$src3)), addr:$dst)]>,
782 def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
783 (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
784 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
785 [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
786 (i8 imm:$src3)), addr:$dst)]>,
788 def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
789 (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
790 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
791 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
792 (i8 imm:$src3)), addr:$dst)]>,
795 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
796 (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
797 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
798 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
799 (i8 imm:$src3)), addr:$dst)]>,
801 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
802 (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
803 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
804 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
805 (i8 imm:$src3)), addr:$dst)]>,
811 // Use the opposite rotate if allows us to use the rotate by 1 instruction.
812 def : Pat<(rotl GR8:$src1, (i8 7)), (ROR8r1 GR8:$src1)>;
813 def : Pat<(rotl GR16:$src1, (i8 15)), (ROR16r1 GR16:$src1)>;
814 def : Pat<(rotl GR32:$src1, (i8 31)), (ROR32r1 GR32:$src1)>;
815 def : Pat<(rotl GR64:$src1, (i8 63)), (ROR64r1 GR64:$src1)>;
816 def : Pat<(rotr GR8:$src1, (i8 7)), (ROL8r1 GR8:$src1)>;
817 def : Pat<(rotr GR16:$src1, (i8 15)), (ROL16r1 GR16:$src1)>;
818 def : Pat<(rotr GR32:$src1, (i8 31)), (ROL32r1 GR32:$src1)>;
819 def : Pat<(rotr GR64:$src1, (i8 63)), (ROL64r1 GR64:$src1)>;
821 def : Pat<(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst),
823 def : Pat<(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst),
824 (ROR16m1 addr:$dst)>;
825 def : Pat<(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst),
826 (ROR32m1 addr:$dst)>;
827 def : Pat<(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst),
828 (ROR64m1 addr:$dst)>, Requires<[In64BitMode]>;
830 def : Pat<(store (rotr (loadi8 addr:$dst), (i8 7)), addr:$dst),
832 def : Pat<(store (rotr (loadi16 addr:$dst), (i8 15)), addr:$dst),
833 (ROL16m1 addr:$dst)>;
834 def : Pat<(store (rotr (loadi32 addr:$dst), (i8 31)), addr:$dst),
835 (ROL32m1 addr:$dst)>;
836 def : Pat<(store (rotr (loadi64 addr:$dst), (i8 63)), addr:$dst),
837 (ROL64m1 addr:$dst)>, Requires<[In64BitMode]>;
839 // Sandy Bridge and newer Intel processors support faster rotates using
840 // SHLD to avoid a partial flag update on the normal rotate instructions.
841 // Use a pseudo so that TwoInstructionPass and register allocation will see
842 // this as unary instruction.
843 let Predicates = [HasFastSHLDRotate], AddedComplexity = 5,
844 Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteSHDrri],
845 Constraints = "$src1 = $dst" in {
846 def SHLDROT32ri : I<0, Pseudo, (outs GR32:$dst),
847 (ins GR32:$src1, u8imm:$shamt), "",
848 [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$shamt)))]>;
849 def SHLDROT64ri : I<0, Pseudo, (outs GR64:$dst),
850 (ins GR64:$src1, u8imm:$shamt), "",
851 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$shamt)))]>;
853 def SHRDROT32ri : I<0, Pseudo, (outs GR32:$dst),
854 (ins GR32:$src1, u8imm:$shamt), "",
855 [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$shamt)))]>;
856 def SHRDROT64ri : I<0, Pseudo, (outs GR64:$dst),
857 (ins GR64:$src1, u8imm:$shamt), "",
858 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$shamt)))]>;
861 def ROT32L2R_imm8 : SDNodeXForm<imm, [{
862 // Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
863 return getI8Imm(32 - N->getZExtValue(), SDLoc(N));
866 def ROT64L2R_imm8 : SDNodeXForm<imm, [{
867 // Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
868 return getI8Imm(64 - N->getZExtValue(), SDLoc(N));
871 // NOTE: We use WriteShift for these rotates as they avoid the stalls
872 // of many of the older x86 rotate instructions.
873 multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
874 let hasSideEffects = 0 in {
875 def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
876 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
877 []>, TAXD, VEX, Sched<[WriteShift]>;
879 def mi : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
880 (ins x86memop:$src1, u8imm:$src2),
881 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
882 []>, TAXD, VEX, Sched<[WriteShiftLd]>;
886 multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
887 let hasSideEffects = 0 in {
888 def rr : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
889 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
890 VEX, Sched<[WriteShift]>;
892 def rm : I<0xF7, MRMSrcMem4VOp3,
893 (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
894 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
895 VEX, Sched<[WriteShift.Folded,
897 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
900 WriteShift.ReadAfterFold]>;
904 let Predicates = [HasBMI2] in {
905 defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
906 defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, VEX_W;
907 defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8XS;
908 defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, VEX_W;
909 defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
910 defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
911 defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8PD;
912 defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8PD, VEX_W;
914 // Prefer RORX which is non-destructive and doesn't update EFLAGS.
915 let AddedComplexity = 10 in {
916 def : Pat<(rotr GR32:$src, (i8 imm:$shamt)),
917 (RORX32ri GR32:$src, imm:$shamt)>;
918 def : Pat<(rotr GR64:$src, (i8 imm:$shamt)),
919 (RORX64ri GR64:$src, imm:$shamt)>;
921 def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
922 (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
923 def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
924 (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
927 def : Pat<(rotr (loadi32 addr:$src), (i8 imm:$shamt)),
928 (RORX32mi addr:$src, imm:$shamt)>;
929 def : Pat<(rotr (loadi64 addr:$src), (i8 imm:$shamt)),
930 (RORX64mi addr:$src, imm:$shamt)>;
932 def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)),
933 (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>;
934 def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)),
935 (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>;
937 // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not
938 // immediate shift, i.e. the following code is considered better
947 // shlx %sil, %edi, %esi
950 let AddedComplexity = 1 in {
951 def : Pat<(sra GR32:$src1, GR8:$src2),
952 (SARX32rr GR32:$src1,
954 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
955 def : Pat<(sra GR64:$src1, GR8:$src2),
956 (SARX64rr GR64:$src1,
958 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
960 def : Pat<(srl GR32:$src1, GR8:$src2),
961 (SHRX32rr GR32:$src1,
963 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
964 def : Pat<(srl GR64:$src1, GR8:$src2),
965 (SHRX64rr GR64:$src1,
967 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
969 def : Pat<(shl GR32:$src1, GR8:$src2),
970 (SHLX32rr GR32:$src1,
972 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
973 def : Pat<(shl GR64:$src1, GR8:$src2),
974 (SHLX64rr GR64:$src1,
976 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
986 // shlx %al, (%ecx), %esi
988 // This priority is enforced by IsProfitableToFoldLoad.
989 def : Pat<(sra (loadi32 addr:$src1), GR8:$src2),
990 (SARX32rm addr:$src1,
992 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
993 def : Pat<(sra (loadi64 addr:$src1), GR8:$src2),
994 (SARX64rm addr:$src1,
996 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
998 def : Pat<(srl (loadi32 addr:$src1), GR8:$src2),
999 (SHRX32rm addr:$src1,
1001 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1002 def : Pat<(srl (loadi64 addr:$src1), GR8:$src2),
1003 (SHRX64rm addr:$src1,
1005 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1007 def : Pat<(shl (loadi32 addr:$src1), GR8:$src2),
1008 (SHLX32rm addr:$src1,
1010 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1011 def : Pat<(shl (loadi64 addr:$src1), GR8:$src2),
1012 (SHLX64rm addr:$src1,
1014 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;