staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / arch / s390 / include / asm / vx-insn.h
blob0c05a673811cee6a1d81096c8a58e4e479b16163
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Support for Vector Instructions
5 * Assembler macros to generate .byte/.word code for particular
6 * vector instructions that are supported by recent binutils (>= 2.26) only.
8 * Copyright IBM Corp. 2015
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
12 #ifndef __ASM_S390_VX_INSN_H
13 #define __ASM_S390_VX_INSN_H
15 #ifdef __ASSEMBLY__
18 /* Macros to generate vector instruction byte code */
20 /* GR_NUM - Retrieve general-purpose register number
22 * @opd: Operand to store register number
23 * @r64: String designation register in the format "%rN"
25 .macro GR_NUM opd gr
26 \opd = 255
27 .ifc \gr,%r0
28 \opd = 0
29 .endif
30 .ifc \gr,%r1
31 \opd = 1
32 .endif
33 .ifc \gr,%r2
34 \opd = 2
35 .endif
36 .ifc \gr,%r3
37 \opd = 3
38 .endif
39 .ifc \gr,%r4
40 \opd = 4
41 .endif
42 .ifc \gr,%r5
43 \opd = 5
44 .endif
45 .ifc \gr,%r6
46 \opd = 6
47 .endif
48 .ifc \gr,%r7
49 \opd = 7
50 .endif
51 .ifc \gr,%r8
52 \opd = 8
53 .endif
54 .ifc \gr,%r9
55 \opd = 9
56 .endif
57 .ifc \gr,%r10
58 \opd = 10
59 .endif
60 .ifc \gr,%r11
61 \opd = 11
62 .endif
63 .ifc \gr,%r12
64 \opd = 12
65 .endif
66 .ifc \gr,%r13
67 \opd = 13
68 .endif
69 .ifc \gr,%r14
70 \opd = 14
71 .endif
72 .ifc \gr,%r15
73 \opd = 15
74 .endif
75 .if \opd == 255
76 \opd = \gr
77 .endif
78 .endm
80 /* VX_NUM - Retrieve vector register number
82 * @opd: Operand to store register number
83 * @vxr: String designation register in the format "%vN"
85 * The vector register number is used for as input number to the
86 * instruction and, as well as, to compute the RXB field of the
87 * instruction.
89 .macro VX_NUM opd vxr
90 \opd = 255
91 .ifc \vxr,%v0
92 \opd = 0
93 .endif
94 .ifc \vxr,%v1
95 \opd = 1
96 .endif
97 .ifc \vxr,%v2
98 \opd = 2
99 .endif
100 .ifc \vxr,%v3
101 \opd = 3
102 .endif
103 .ifc \vxr,%v4
104 \opd = 4
105 .endif
106 .ifc \vxr,%v5
107 \opd = 5
108 .endif
109 .ifc \vxr,%v6
110 \opd = 6
111 .endif
112 .ifc \vxr,%v7
113 \opd = 7
114 .endif
115 .ifc \vxr,%v8
116 \opd = 8
117 .endif
118 .ifc \vxr,%v9
119 \opd = 9
120 .endif
121 .ifc \vxr,%v10
122 \opd = 10
123 .endif
124 .ifc \vxr,%v11
125 \opd = 11
126 .endif
127 .ifc \vxr,%v12
128 \opd = 12
129 .endif
130 .ifc \vxr,%v13
131 \opd = 13
132 .endif
133 .ifc \vxr,%v14
134 \opd = 14
135 .endif
136 .ifc \vxr,%v15
137 \opd = 15
138 .endif
139 .ifc \vxr,%v16
140 \opd = 16
141 .endif
142 .ifc \vxr,%v17
143 \opd = 17
144 .endif
145 .ifc \vxr,%v18
146 \opd = 18
147 .endif
148 .ifc \vxr,%v19
149 \opd = 19
150 .endif
151 .ifc \vxr,%v20
152 \opd = 20
153 .endif
154 .ifc \vxr,%v21
155 \opd = 21
156 .endif
157 .ifc \vxr,%v22
158 \opd = 22
159 .endif
160 .ifc \vxr,%v23
161 \opd = 23
162 .endif
163 .ifc \vxr,%v24
164 \opd = 24
165 .endif
166 .ifc \vxr,%v25
167 \opd = 25
168 .endif
169 .ifc \vxr,%v26
170 \opd = 26
171 .endif
172 .ifc \vxr,%v27
173 \opd = 27
174 .endif
175 .ifc \vxr,%v28
176 \opd = 28
177 .endif
178 .ifc \vxr,%v29
179 \opd = 29
180 .endif
181 .ifc \vxr,%v30
182 \opd = 30
183 .endif
184 .ifc \vxr,%v31
185 \opd = 31
186 .endif
187 .if \opd == 255
188 \opd = \vxr
189 .endif
190 .endm
192 /* RXB - Compute most significant bit used vector registers
194 * @rxb: Operand to store computed RXB value
195 * @v1: First vector register designated operand
196 * @v2: Second vector register designated operand
197 * @v3: Third vector register designated operand
198 * @v4: Fourth vector register designated operand
200 .macro RXB rxb v1 v2=0 v3=0 v4=0
201 \rxb = 0
202 .if \v1 & 0x10
203 \rxb = \rxb | 0x08
204 .endif
205 .if \v2 & 0x10
206 \rxb = \rxb | 0x04
207 .endif
208 .if \v3 & 0x10
209 \rxb = \rxb | 0x02
210 .endif
211 .if \v4 & 0x10
212 \rxb = \rxb | 0x01
213 .endif
214 .endm
216 /* MRXB - Generate Element Size Control and RXB value
218 * @m: Element size control
219 * @v1: First vector register designated operand (for RXB)
220 * @v2: Second vector register designated operand (for RXB)
221 * @v3: Third vector register designated operand (for RXB)
222 * @v4: Fourth vector register designated operand (for RXB)
224 .macro MRXB m v1 v2=0 v3=0 v4=0
225 rxb = 0
226 RXB rxb, \v1, \v2, \v3, \v4
227 .byte (\m << 4) | rxb
228 .endm
230 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
232 * @m: Element size control
233 * @opc: Opcode
234 * @v1: First vector register designated operand (for RXB)
235 * @v2: Second vector register designated operand (for RXB)
236 * @v3: Third vector register designated operand (for RXB)
237 * @v4: Fourth vector register designated operand (for RXB)
239 .macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
240 MRXB \m, \v1, \v2, \v3, \v4
241 .byte \opc
242 .endm
244 /* Vector support instructions */
246 /* VECTOR GENERATE BYTE MASK */
247 .macro VGBM vr imm2
248 VX_NUM v1, \vr
249 .word (0xE700 | ((v1&15) << 4))
250 .word \imm2
251 MRXBOPC 0, 0x44, v1
252 .endm
253 .macro VZERO vxr
254 VGBM \vxr, 0
255 .endm
256 .macro VONE vxr
257 VGBM \vxr, 0xFFFF
258 .endm
260 /* VECTOR LOAD VR ELEMENT FROM GR */
261 .macro VLVG v, gr, disp, m
262 VX_NUM v1, \v
263 GR_NUM b2, "%r0"
264 GR_NUM r3, \gr
265 .word 0xE700 | ((v1&15) << 4) | r3
266 .word (b2 << 12) | (\disp)
267 MRXBOPC \m, 0x22, v1
268 .endm
269 .macro VLVGB v, gr, index, base
270 VLVG \v, \gr, \index, \base, 0
271 .endm
272 .macro VLVGH v, gr, index
273 VLVG \v, \gr, \index, 1
274 .endm
275 .macro VLVGF v, gr, index
276 VLVG \v, \gr, \index, 2
277 .endm
278 .macro VLVGG v, gr, index
279 VLVG \v, \gr, \index, 3
280 .endm
282 /* VECTOR LOAD REGISTER */
283 .macro VLR v1, v2
284 VX_NUM v1, \v1
285 VX_NUM v2, \v2
286 .word 0xE700 | ((v1&15) << 4) | (v2&15)
287 .word 0
288 MRXBOPC 0, 0x56, v1, v2
289 .endm
291 /* VECTOR LOAD */
292 .macro VL v, disp, index="%r0", base
293 VX_NUM v1, \v
294 GR_NUM x2, \index
295 GR_NUM b2, \base
296 .word 0xE700 | ((v1&15) << 4) | x2
297 .word (b2 << 12) | (\disp)
298 MRXBOPC 0, 0x06, v1
299 .endm
301 /* VECTOR LOAD ELEMENT */
302 .macro VLEx vr1, disp, index="%r0", base, m3, opc
303 VX_NUM v1, \vr1
304 GR_NUM x2, \index
305 GR_NUM b2, \base
306 .word 0xE700 | ((v1&15) << 4) | x2
307 .word (b2 << 12) | (\disp)
308 MRXBOPC \m3, \opc, v1
309 .endm
310 .macro VLEB vr1, disp, index="%r0", base, m3
311 VLEx \vr1, \disp, \index, \base, \m3, 0x00
312 .endm
313 .macro VLEH vr1, disp, index="%r0", base, m3
314 VLEx \vr1, \disp, \index, \base, \m3, 0x01
315 .endm
316 .macro VLEF vr1, disp, index="%r0", base, m3
317 VLEx \vr1, \disp, \index, \base, \m3, 0x03
318 .endm
319 .macro VLEG vr1, disp, index="%r0", base, m3
320 VLEx \vr1, \disp, \index, \base, \m3, 0x02
321 .endm
323 /* VECTOR LOAD ELEMENT IMMEDIATE */
324 .macro VLEIx vr1, imm2, m3, opc
325 VX_NUM v1, \vr1
326 .word 0xE700 | ((v1&15) << 4)
327 .word \imm2
328 MRXBOPC \m3, \opc, v1
329 .endm
330 .macro VLEIB vr1, imm2, index
331 VLEIx \vr1, \imm2, \index, 0x40
332 .endm
333 .macro VLEIH vr1, imm2, index
334 VLEIx \vr1, \imm2, \index, 0x41
335 .endm
336 .macro VLEIF vr1, imm2, index
337 VLEIx \vr1, \imm2, \index, 0x43
338 .endm
339 .macro VLEIG vr1, imm2, index
340 VLEIx \vr1, \imm2, \index, 0x42
341 .endm
343 /* VECTOR LOAD GR FROM VR ELEMENT */
344 .macro VLGV gr, vr, disp, base="%r0", m
345 GR_NUM r1, \gr
346 GR_NUM b2, \base
347 VX_NUM v3, \vr
348 .word 0xE700 | (r1 << 4) | (v3&15)
349 .word (b2 << 12) | (\disp)
350 MRXBOPC \m, 0x21, v3
351 .endm
352 .macro VLGVB gr, vr, disp, base="%r0"
353 VLGV \gr, \vr, \disp, \base, 0
354 .endm
355 .macro VLGVH gr, vr, disp, base="%r0"
356 VLGV \gr, \vr, \disp, \base, 1
357 .endm
358 .macro VLGVF gr, vr, disp, base="%r0"
359 VLGV \gr, \vr, \disp, \base, 2
360 .endm
361 .macro VLGVG gr, vr, disp, base="%r0"
362 VLGV \gr, \vr, \disp, \base, 3
363 .endm
365 /* VECTOR LOAD MULTIPLE */
366 .macro VLM vfrom, vto, disp, base, hint=3
367 VX_NUM v1, \vfrom
368 VX_NUM v3, \vto
369 GR_NUM b2, \base /* Base register */
370 .word 0xE700 | ((v1&15) << 4) | (v3&15)
371 .word (b2 << 12) | (\disp)
372 MRXBOPC \hint, 0x36, v1, v3
373 .endm
375 /* VECTOR STORE MULTIPLE */
376 .macro VSTM vfrom, vto, disp, base, hint=3
377 VX_NUM v1, \vfrom
378 VX_NUM v3, \vto
379 GR_NUM b2, \base /* Base register */
380 .word 0xE700 | ((v1&15) << 4) | (v3&15)
381 .word (b2 << 12) | (\disp)
382 MRXBOPC \hint, 0x3E, v1, v3
383 .endm
385 /* VECTOR PERMUTE */
386 .macro VPERM vr1, vr2, vr3, vr4
387 VX_NUM v1, \vr1
388 VX_NUM v2, \vr2
389 VX_NUM v3, \vr3
390 VX_NUM v4, \vr4
391 .word 0xE700 | ((v1&15) << 4) | (v2&15)
392 .word ((v3&15) << 12)
393 MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
394 .endm
396 /* VECTOR UNPACK LOGICAL LOW */
397 .macro VUPLL vr1, vr2, m3
398 VX_NUM v1, \vr1
399 VX_NUM v2, \vr2
400 .word 0xE700 | ((v1&15) << 4) | (v2&15)
401 .word 0x0000
402 MRXBOPC \m3, 0xD4, v1, v2
403 .endm
404 .macro VUPLLB vr1, vr2
405 VUPLL \vr1, \vr2, 0
406 .endm
407 .macro VUPLLH vr1, vr2
408 VUPLL \vr1, \vr2, 1
409 .endm
410 .macro VUPLLF vr1, vr2
411 VUPLL \vr1, \vr2, 2
412 .endm
415 /* Vector integer instructions */
417 /* VECTOR AND */
418 .macro VN vr1, vr2, vr3
419 VX_NUM v1, \vr1
420 VX_NUM v2, \vr2
421 VX_NUM v3, \vr3
422 .word 0xE700 | ((v1&15) << 4) | (v2&15)
423 .word ((v3&15) << 12)
424 MRXBOPC 0, 0x68, v1, v2, v3
425 .endm
427 /* VECTOR EXCLUSIVE OR */
428 .macro VX vr1, vr2, vr3
429 VX_NUM v1, \vr1
430 VX_NUM v2, \vr2
431 VX_NUM v3, \vr3
432 .word 0xE700 | ((v1&15) << 4) | (v2&15)
433 .word ((v3&15) << 12)
434 MRXBOPC 0, 0x6D, v1, v2, v3
435 .endm
437 /* VECTOR GALOIS FIELD MULTIPLY SUM */
438 .macro VGFM vr1, vr2, vr3, m4
439 VX_NUM v1, \vr1
440 VX_NUM v2, \vr2
441 VX_NUM v3, \vr3
442 .word 0xE700 | ((v1&15) << 4) | (v2&15)
443 .word ((v3&15) << 12)
444 MRXBOPC \m4, 0xB4, v1, v2, v3
445 .endm
446 .macro VGFMB vr1, vr2, vr3
447 VGFM \vr1, \vr2, \vr3, 0
448 .endm
449 .macro VGFMH vr1, vr2, vr3
450 VGFM \vr1, \vr2, \vr3, 1
451 .endm
452 .macro VGFMF vr1, vr2, vr3
453 VGFM \vr1, \vr2, \vr3, 2
454 .endm
455 .macro VGFMG vr1, vr2, vr3
456 VGFM \vr1, \vr2, \vr3, 3
457 .endm
459 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
460 .macro VGFMA vr1, vr2, vr3, vr4, m5
461 VX_NUM v1, \vr1
462 VX_NUM v2, \vr2
463 VX_NUM v3, \vr3
464 VX_NUM v4, \vr4
465 .word 0xE700 | ((v1&15) << 4) | (v2&15)
466 .word ((v3&15) << 12) | (\m5 << 8)
467 MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
468 .endm
469 .macro VGFMAB vr1, vr2, vr3, vr4
470 VGFMA \vr1, \vr2, \vr3, \vr4, 0
471 .endm
472 .macro VGFMAH vr1, vr2, vr3, vr4
473 VGFMA \vr1, \vr2, \vr3, \vr4, 1
474 .endm
475 .macro VGFMAF vr1, vr2, vr3, vr4
476 VGFMA \vr1, \vr2, \vr3, \vr4, 2
477 .endm
478 .macro VGFMAG vr1, vr2, vr3, vr4
479 VGFMA \vr1, \vr2, \vr3, \vr4, 3
480 .endm
482 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
483 .macro VSRLB vr1, vr2, vr3
484 VX_NUM v1, \vr1
485 VX_NUM v2, \vr2
486 VX_NUM v3, \vr3
487 .word 0xE700 | ((v1&15) << 4) | (v2&15)
488 .word ((v3&15) << 12)
489 MRXBOPC 0, 0x7D, v1, v2, v3
490 .endm
492 /* VECTOR REPLICATE IMMEDIATE */
493 .macro VREPI vr1, imm2, m3
494 VX_NUM v1, \vr1
495 .word 0xE700 | ((v1&15) << 4)
496 .word \imm2
497 MRXBOPC \m3, 0x45, v1
498 .endm
499 .macro VREPIB vr1, imm2
500 VREPI \vr1, \imm2, 0
501 .endm
502 .macro VREPIH vr1, imm2
503 VREPI \vr1, \imm2, 1
504 .endm
505 .macro VREPIF vr1, imm2
506 VREPI \vr1, \imm2, 2
507 .endm
508 .macro VREPIG vr1, imm2
509 VREP \vr1, \imm2, 3
510 .endm
512 /* VECTOR ADD */
513 .macro VA vr1, vr2, vr3, m4
514 VX_NUM v1, \vr1
515 VX_NUM v2, \vr2
516 VX_NUM v3, \vr3
517 .word 0xE700 | ((v1&15) << 4) | (v2&15)
518 .word ((v3&15) << 12)
519 MRXBOPC \m4, 0xF3, v1, v2, v3
520 .endm
521 .macro VAB vr1, vr2, vr3
522 VA \vr1, \vr2, \vr3, 0
523 .endm
524 .macro VAH vr1, vr2, vr3
525 VA \vr1, \vr2, \vr3, 1
526 .endm
527 .macro VAF vr1, vr2, vr3
528 VA \vr1, \vr2, \vr3, 2
529 .endm
530 .macro VAG vr1, vr2, vr3
531 VA \vr1, \vr2, \vr3, 3
532 .endm
533 .macro VAQ vr1, vr2, vr3
534 VA \vr1, \vr2, \vr3, 4
535 .endm
537 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
538 .macro VESRAV vr1, vr2, vr3, m4
539 VX_NUM v1, \vr1
540 VX_NUM v2, \vr2
541 VX_NUM v3, \vr3
542 .word 0xE700 | ((v1&15) << 4) | (v2&15)
543 .word ((v3&15) << 12)
544 MRXBOPC \m4, 0x7A, v1, v2, v3
545 .endm
547 .macro VESRAVB vr1, vr2, vr3
548 VESRAV \vr1, \vr2, \vr3, 0
549 .endm
550 .macro VESRAVH vr1, vr2, vr3
551 VESRAV \vr1, \vr2, \vr3, 1
552 .endm
553 .macro VESRAVF vr1, vr2, vr3
554 VESRAV \vr1, \vr2, \vr3, 2
555 .endm
556 .macro VESRAVG vr1, vr2, vr3
557 VESRAV \vr1, \vr2, \vr3, 3
558 .endm
560 #endif /* __ASSEMBLY__ */
561 #endif /* __ASM_S390_VX_INSN_H */