1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Support for Vector Instructions
5 * Assembler macros to generate .byte/.word code for particular
6 * vector instructions that are supported by recent binutils (>= 2.26) only.
8 * Copyright IBM Corp. 2015
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
12 #ifndef __ASM_S390_VX_INSN_H
13 #define __ASM_S390_VX_INSN_H
18 /* Macros to generate vector instruction byte code */
20 /* GR_NUM - Retrieve general-purpose register number
22 * @opd: Operand to store register number
23 * @r64: String designation register in the format "%rN"
80 /* VX_NUM - Retrieve vector register number
82 * @opd: Operand to store register number
83 * @vxr: String designation register in the format "%vN"
85 * The vector register number is used for as input number to the
86 * instruction and, as well as, to compute the RXB field of the
192 /* RXB - Compute most significant bit used vector registers
194 * @rxb: Operand to store computed RXB value
195 * @v1: First vector register designated operand
196 * @v2: Second vector register designated operand
197 * @v3: Third vector register designated operand
198 * @v4: Fourth vector register designated operand
200 .macro RXB rxb v1 v2
=0 v3
=0 v4
=0
216 /* MRXB - Generate Element Size Control and RXB value
218 * @m: Element size control
219 * @v1: First vector register designated operand (for RXB)
220 * @v2: Second vector register designated operand (for RXB)
221 * @v3: Third vector register designated operand (for RXB)
222 * @v4: Fourth vector register designated operand (for RXB)
224 .macro MRXB m v1 v2
=0 v3
=0 v4
=0
226 RXB rxb
, \v1, \v2, \v3, \v4
227 .byte (\m
<< 4) | rxb
230 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
232 * @m: Element size control
234 * @v1: First vector register designated operand (for RXB)
235 * @v2: Second vector register designated operand (for RXB)
236 * @v3: Third vector register designated operand (for RXB)
237 * @v4: Fourth vector register designated operand (for RXB)
239 .macro MRXBOPC m opc v1 v2
=0 v3
=0 v4
=0
240 MRXB \m
, \v1, \v2, \v3, \v4
244 /* Vector support instructions */
246 /* VECTOR GENERATE BYTE MASK */
249 .word (0xE700 | ((v1
&15) << 4))
260 /* VECTOR LOAD VR ELEMENT FROM GR */
261 .macro VLVG v
, gr
, disp
, m
265 .word
0xE700 | ((v1
&15) << 4) | r3
266 .word (b2
<< 12) | (\disp
)
269 .macro VLVGB v
, gr
, index
, base
270 VLVG
\v, \gr
, \index
, \base
, 0
272 .macro VLVGH v
, gr
, index
273 VLVG
\v, \gr
, \index
, 1
275 .macro VLVGF v
, gr
, index
276 VLVG
\v, \gr
, \index
, 2
278 .macro VLVGG v
, gr
, index
279 VLVG
\v, \gr
, \index
, 3
282 /* VECTOR LOAD REGISTER */
286 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
288 MRXBOPC
0, 0x56, v1
, v2
292 .macro VL v
, disp
, index
="%r0", base
296 .word
0xE700 | ((v1
&15) << 4) | x2
297 .word (b2
<< 12) | (\disp
)
301 /* VECTOR LOAD ELEMENT */
302 .macro VLEx vr1
, disp
, index
="%r0", base
, m3
, opc
306 .word
0xE700 | ((v1
&15) << 4) | x2
307 .word (b2
<< 12) | (\disp
)
308 MRXBOPC \m3
, \opc
, v1
310 .macro VLEB vr1
, disp
, index
="%r0", base
, m3
311 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x00
313 .macro VLEH vr1
, disp
, index
="%r0", base
, m3
314 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x01
316 .macro VLEF vr1
, disp
, index
="%r0", base
, m3
317 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x03
319 .macro VLEG vr1
, disp
, index
="%r0", base
, m3
320 VLEx
\vr
1, \disp
, \index
, \base
, \m3
, 0x02
323 /* VECTOR LOAD ELEMENT IMMEDIATE */
324 .macro VLEIx vr1
, imm2
, m3
, opc
326 .word
0xE700 | ((v1
&15) << 4)
328 MRXBOPC \m3
, \opc
, v1
330 .macro VLEIB vr1
, imm2
, index
331 VLEIx
\vr
1, \imm2
, \index
, 0x40
333 .macro VLEIH vr1
, imm2
, index
334 VLEIx
\vr
1, \imm2
, \index
, 0x41
336 .macro VLEIF vr1
, imm2
, index
337 VLEIx
\vr
1, \imm2
, \index
, 0x43
339 .macro VLEIG vr1
, imm2
, index
340 VLEIx
\vr
1, \imm2
, \index
, 0x42
343 /* VECTOR LOAD GR FROM VR ELEMENT */
344 .macro VLGV gr
, vr
, disp
, base
="%r0", m
348 .word
0xE700 | (r1
<< 4) | (v3
&15)
349 .word (b2
<< 12) | (\disp
)
352 .macro VLGVB gr
, vr
, disp
, base
="%r0"
353 VLGV \gr
, \vr
, \disp
, \base
, 0
355 .macro VLGVH gr
, vr
, disp
, base
="%r0"
356 VLGV \gr
, \vr
, \disp
, \base
, 1
358 .macro VLGVF gr
, vr
, disp
, base
="%r0"
359 VLGV \gr
, \vr
, \disp
, \base
, 2
361 .macro VLGVG gr
, vr
, disp
, base
="%r0"
362 VLGV \gr
, \vr
, \disp
, \base
, 3
365 /* VECTOR LOAD MULTIPLE */
366 .macro VLM vfrom
, vto
, disp
, base
369 GR_NUM b2
, \base
/* Base register */
370 .word
0xE700 | ((v1
&15) << 4) | (v3
&15)
371 .word (b2
<< 12) | (\disp
)
372 MRXBOPC
0, 0x36, v1
, v3
375 /* VECTOR STORE MULTIPLE */
376 .macro VSTM vfrom
, vto
, disp
, base
379 GR_NUM b2
, \base
/* Base register */
380 .word
0xE700 | ((v1
&15) << 4) | (v3
&15)
381 .word (b2
<< 12) | (\disp
)
382 MRXBOPC
0, 0x3E, v1
, v3
386 .macro VPERM vr1
, vr2
, vr3
, vr4
391 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
392 .word ((v3
&15) << 12)
393 MRXBOPC (v4
&15), 0x8C, v1
, v2
, v3
, v4
396 /* VECTOR UNPACK LOGICAL LOW */
397 .macro VUPLL vr1
, vr2
, m3
400 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
402 MRXBOPC \m3
, 0xD4, v1
, v2
404 .macro VUPLLB vr1
, vr2
407 .macro VUPLLH vr1
, vr2
410 .macro VUPLLF vr1
, vr2
415 /* Vector integer instructions */
418 .macro VN vr1
, vr2
, vr3
422 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
423 .word ((v3
&15) << 12)
424 MRXBOPC
0, 0x68, v1
, v2
, v3
427 /* VECTOR EXCLUSIVE OR */
428 .macro VX vr1
, vr2
, vr3
432 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
433 .word ((v3
&15) << 12)
434 MRXBOPC
0, 0x6D, v1
, v2
, v3
437 /* VECTOR GALOIS FIELD MULTIPLY SUM */
438 .macro VGFM vr1
, vr2
, vr3
, m4
442 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
443 .word ((v3
&15) << 12)
444 MRXBOPC \m4
, 0xB4, v1
, v2
, v3
446 .macro VGFMB vr1
, vr2
, vr3
447 VGFM
\vr
1, \vr
2, \vr
3, 0
449 .macro VGFMH vr1
, vr2
, vr3
450 VGFM
\vr
1, \vr
2, \vr
3, 1
452 .macro VGFMF vr1
, vr2
, vr3
453 VGFM
\vr
1, \vr
2, \vr
3, 2
455 .macro VGFMG vr1
, vr2
, vr3
456 VGFM
\vr
1, \vr
2, \vr
3, 3
459 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
460 .macro VGFMA vr1
, vr2
, vr3
, vr4
, m5
465 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
466 .word ((v3
&15) << 12) | (\m5
<< 8)
467 MRXBOPC (v4
&15), 0xBC, v1
, v2
, v3
, v4
469 .macro VGFMAB vr1
, vr2
, vr3
, vr4
470 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 0
472 .macro VGFMAH vr1
, vr2
, vr3
, vr4
473 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 1
475 .macro VGFMAF vr1
, vr2
, vr3
, vr4
476 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 2
478 .macro VGFMAG vr1
, vr2
, vr3
, vr4
479 VGFMA
\vr
1, \vr
2, \vr
3, \vr
4, 3
482 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
483 .macro VSRLB vr1
, vr2
, vr3
487 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
488 .word ((v3
&15) << 12)
489 MRXBOPC
0, 0x7D, v1
, v2
, v3
492 /* VECTOR REPLICATE IMMEDIATE */
493 .macro VREPI vr1
, imm2
, m3
495 .word
0xE700 | ((v1
&15) << 4)
497 MRXBOPC \m3
, 0x45, v1
499 .macro VREPIB vr1
, imm2
502 .macro VREPIH vr1
, imm2
505 .macro VREPIF vr1
, imm2
508 .macro VREPIG vr1
, imm2
513 .macro VA vr1
, vr2
, vr3
, m4
517 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
518 .word ((v3
&15) << 12)
519 MRXBOPC \m4
, 0xF3, v1
, v2
, v3
521 .macro VAB vr1
, vr2
, vr3
522 VA
\vr
1, \vr
2, \vr
3, 0
524 .macro VAH vr1
, vr2
, vr3
525 VA
\vr
1, \vr
2, \vr
3, 1
527 .macro VAF vr1
, vr2
, vr3
528 VA
\vr
1, \vr
2, \vr
3, 2
530 .macro VAG vr1
, vr2
, vr3
531 VA
\vr
1, \vr
2, \vr
3, 3
533 .macro VAQ vr1
, vr2
, vr3
534 VA
\vr
1, \vr
2, \vr
3, 4
537 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
538 .macro VESRAV vr1
, vr2
, vr3
, m4
542 .word
0xE700 | ((v1
&15) << 4) | (v2
&15)
543 .word ((v3
&15) << 12)
544 MRXBOPC \m4
, 0x7A, v1
, v2
, v3
547 .macro VESRAVB vr1
, vr2
, vr3
548 VESRAV
\vr
1, \vr
2, \vr
3, 0
550 .macro VESRAVH vr1
, vr2
, vr3
551 VESRAV
\vr
1, \vr
2, \vr
3, 1
553 .macro VESRAVF vr1
, vr2
, vr3
554 VESRAV
\vr
1, \vr
2, \vr
3, 2
556 .macro VESRAVG vr1
, vr2
, vr3
557 VESRAV
\vr
1, \vr
2, \vr
3, 3
560 #endif /* __ASSEMBLY__ */
561 #endif /* __ASM_S390_VX_INSN_H */