Linux 4.8-rc8
[linux/fpc-iii.git] / arch / s390 / include / asm / vx-insn.h
blob4a3135620f5e4c82f1416c0cd1c0735703ad01ac
1 /*
2 * Support for Vector Instructions
4 * Assembler macros to generate .byte/.word code for particular
5 * vector instructions that are supported by recent binutils (>= 2.26) only.
7 * Copyright IBM Corp. 2015
8 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 */
11 #ifndef __ASM_S390_VX_INSN_H
12 #define __ASM_S390_VX_INSN_H
14 #ifdef __ASSEMBLY__
17 /* Macros to generate vector instruction byte code */
19 #define REG_NUM_INVALID 255
21 /* GR_NUM - Retrieve general-purpose register number
23 * @opd: Operand to store register number
24 * @r64: String designation register in the format "%rN"
26 .macro GR_NUM opd gr
27 \opd = REG_NUM_INVALID
28 .ifc \gr,%r0
29 \opd = 0
30 .endif
31 .ifc \gr,%r1
32 \opd = 1
33 .endif
34 .ifc \gr,%r2
35 \opd = 2
36 .endif
37 .ifc \gr,%r3
38 \opd = 3
39 .endif
40 .ifc \gr,%r4
41 \opd = 4
42 .endif
43 .ifc \gr,%r5
44 \opd = 5
45 .endif
46 .ifc \gr,%r6
47 \opd = 6
48 .endif
49 .ifc \gr,%r7
50 \opd = 7
51 .endif
52 .ifc \gr,%r8
53 \opd = 8
54 .endif
55 .ifc \gr,%r9
56 \opd = 9
57 .endif
58 .ifc \gr,%r10
59 \opd = 10
60 .endif
61 .ifc \gr,%r11
62 \opd = 11
63 .endif
64 .ifc \gr,%r12
65 \opd = 12
66 .endif
67 .ifc \gr,%r13
68 \opd = 13
69 .endif
70 .ifc \gr,%r14
71 \opd = 14
72 .endif
73 .ifc \gr,%r15
74 \opd = 15
75 .endif
76 .if \opd == REG_NUM_INVALID
77 .error "Invalid general-purpose register designation: \gr"
78 .endif
79 .endm
81 /* VX_R() - Macro to encode the VX_NUM into the instruction */
82 #define VX_R(v) (v & 0x0F)
84 /* VX_NUM - Retrieve vector register number
86 * @opd: Operand to store register number
87 * @vxr: String designation register in the format "%vN"
89 * The vector register number is used for as input number to the
90 * instruction and, as well as, to compute the RXB field of the
91 * instruction. To encode the particular vector register number,
92 * use the VX_R(v) macro to extract the instruction opcode.
94 .macro VX_NUM opd vxr
95 \opd = REG_NUM_INVALID
96 .ifc \vxr,%v0
97 \opd = 0
98 .endif
99 .ifc \vxr,%v1
100 \opd = 1
101 .endif
102 .ifc \vxr,%v2
103 \opd = 2
104 .endif
105 .ifc \vxr,%v3
106 \opd = 3
107 .endif
108 .ifc \vxr,%v4
109 \opd = 4
110 .endif
111 .ifc \vxr,%v5
112 \opd = 5
113 .endif
114 .ifc \vxr,%v6
115 \opd = 6
116 .endif
117 .ifc \vxr,%v7
118 \opd = 7
119 .endif
120 .ifc \vxr,%v8
121 \opd = 8
122 .endif
123 .ifc \vxr,%v9
124 \opd = 9
125 .endif
126 .ifc \vxr,%v10
127 \opd = 10
128 .endif
129 .ifc \vxr,%v11
130 \opd = 11
131 .endif
132 .ifc \vxr,%v12
133 \opd = 12
134 .endif
135 .ifc \vxr,%v13
136 \opd = 13
137 .endif
138 .ifc \vxr,%v14
139 \opd = 14
140 .endif
141 .ifc \vxr,%v15
142 \opd = 15
143 .endif
144 .ifc \vxr,%v16
145 \opd = 16
146 .endif
147 .ifc \vxr,%v17
148 \opd = 17
149 .endif
150 .ifc \vxr,%v18
151 \opd = 18
152 .endif
153 .ifc \vxr,%v19
154 \opd = 19
155 .endif
156 .ifc \vxr,%v20
157 \opd = 20
158 .endif
159 .ifc \vxr,%v21
160 \opd = 21
161 .endif
162 .ifc \vxr,%v22
163 \opd = 22
164 .endif
165 .ifc \vxr,%v23
166 \opd = 23
167 .endif
168 .ifc \vxr,%v24
169 \opd = 24
170 .endif
171 .ifc \vxr,%v25
172 \opd = 25
173 .endif
174 .ifc \vxr,%v26
175 \opd = 26
176 .endif
177 .ifc \vxr,%v27
178 \opd = 27
179 .endif
180 .ifc \vxr,%v28
181 \opd = 28
182 .endif
183 .ifc \vxr,%v29
184 \opd = 29
185 .endif
186 .ifc \vxr,%v30
187 \opd = 30
188 .endif
189 .ifc \vxr,%v31
190 \opd = 31
191 .endif
192 .if \opd == REG_NUM_INVALID
193 .error "Invalid vector register designation: \vxr"
194 .endif
195 .endm
197 /* RXB - Compute most significant bit used vector registers
199 * @rxb: Operand to store computed RXB value
200 * @v1: First vector register designated operand
201 * @v2: Second vector register designated operand
202 * @v3: Third vector register designated operand
203 * @v4: Fourth vector register designated operand
205 .macro RXB rxb v1 v2=0 v3=0 v4=0
206 \rxb = 0
207 .if \v1 & 0x10
208 \rxb = \rxb | 0x08
209 .endif
210 .if \v2 & 0x10
211 \rxb = \rxb | 0x04
212 .endif
213 .if \v3 & 0x10
214 \rxb = \rxb | 0x02
215 .endif
216 .if \v4 & 0x10
217 \rxb = \rxb | 0x01
218 .endif
219 .endm
221 /* MRXB - Generate Element Size Control and RXB value
223 * @m: Element size control
224 * @v1: First vector register designated operand (for RXB)
225 * @v2: Second vector register designated operand (for RXB)
226 * @v3: Third vector register designated operand (for RXB)
227 * @v4: Fourth vector register designated operand (for RXB)
229 .macro MRXB m v1 v2=0 v3=0 v4=0
230 rxb = 0
231 RXB rxb, \v1, \v2, \v3, \v4
232 .byte (\m << 4) | rxb
233 .endm
235 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
237 * @m: Element size control
238 * @opc: Opcode
239 * @v1: First vector register designated operand (for RXB)
240 * @v2: Second vector register designated operand (for RXB)
241 * @v3: Third vector register designated operand (for RXB)
242 * @v4: Fourth vector register designated operand (for RXB)
244 .macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
245 MRXB \m, \v1, \v2, \v3, \v4
246 .byte \opc
247 .endm
249 /* Vector support instructions */
251 /* VECTOR GENERATE BYTE MASK */
252 .macro VGBM vr imm2
253 VX_NUM v1, \vr
254 .word (0xE700 | (VX_R(v1) << 4))
255 .word \imm2
256 MRXBOPC 0, 0x44, v1
257 .endm
258 .macro VZERO vxr
259 VGBM \vxr, 0
260 .endm
261 .macro VONE vxr
262 VGBM \vxr, 0xFFFF
263 .endm
265 /* VECTOR LOAD VR ELEMENT FROM GR */
266 .macro VLVG v, gr, disp, m
267 VX_NUM v1, \v
268 GR_NUM b2, "%r0"
269 GR_NUM r3, \gr
270 .word 0xE700 | (VX_R(v1) << 4) | r3
271 .word (b2 << 12) | (\disp)
272 MRXBOPC \m, 0x22, v1
273 .endm
274 .macro VLVGB v, gr, index, base
275 VLVG \v, \gr, \index, \base, 0
276 .endm
277 .macro VLVGH v, gr, index
278 VLVG \v, \gr, \index, 1
279 .endm
280 .macro VLVGF v, gr, index
281 VLVG \v, \gr, \index, 2
282 .endm
283 .macro VLVGG v, gr, index
284 VLVG \v, \gr, \index, 3
285 .endm
287 /* VECTOR LOAD */
288 .macro VL v, disp, index="%r0", base
289 VX_NUM v1, \v
290 GR_NUM x2, \index
291 GR_NUM b2, \base
292 .word 0xE700 | (VX_R(v1) << 4) | x2
293 .word (b2 << 12) | (\disp)
294 MRXBOPC 0, 0x06, v1
295 .endm
297 /* VECTOR LOAD ELEMENT */
298 .macro VLEx vr1, disp, index="%r0", base, m3, opc
299 VX_NUM v1, \vr1
300 GR_NUM x2, \index
301 GR_NUM b2, \base
302 .word 0xE700 | (VX_R(v1) << 4) | x2
303 .word (b2 << 12) | (\disp)
304 MRXBOPC \m3, \opc, v1
305 .endm
306 .macro VLEB vr1, disp, index="%r0", base, m3
307 VLEx \vr1, \disp, \index, \base, \m3, 0x00
308 .endm
309 .macro VLEH vr1, disp, index="%r0", base, m3
310 VLEx \vr1, \disp, \index, \base, \m3, 0x01
311 .endm
312 .macro VLEF vr1, disp, index="%r0", base, m3
313 VLEx \vr1, \disp, \index, \base, \m3, 0x03
314 .endm
315 .macro VLEG vr1, disp, index="%r0", base, m3
316 VLEx \vr1, \disp, \index, \base, \m3, 0x02
317 .endm
319 /* VECTOR LOAD ELEMENT IMMEDIATE */
320 .macro VLEIx vr1, imm2, m3, opc
321 VX_NUM v1, \vr1
322 .word 0xE700 | (VX_R(v1) << 4)
323 .word \imm2
324 MRXBOPC \m3, \opc, v1
325 .endm
326 .macro VLEIB vr1, imm2, index
327 VLEIx \vr1, \imm2, \index, 0x40
328 .endm
329 .macro VLEIH vr1, imm2, index
330 VLEIx \vr1, \imm2, \index, 0x41
331 .endm
332 .macro VLEIF vr1, imm2, index
333 VLEIx \vr1, \imm2, \index, 0x43
334 .endm
335 .macro VLEIG vr1, imm2, index
336 VLEIx \vr1, \imm2, \index, 0x42
337 .endm
339 /* VECTOR LOAD GR FROM VR ELEMENT */
340 .macro VLGV gr, vr, disp, base="%r0", m
341 GR_NUM r1, \gr
342 GR_NUM b2, \base
343 VX_NUM v3, \vr
344 .word 0xE700 | (r1 << 4) | VX_R(v3)
345 .word (b2 << 12) | (\disp)
346 MRXBOPC \m, 0x21, v3
347 .endm
348 .macro VLGVB gr, vr, disp, base="%r0"
349 VLGV \gr, \vr, \disp, \base, 0
350 .endm
351 .macro VLGVH gr, vr, disp, base="%r0"
352 VLGV \gr, \vr, \disp, \base, 1
353 .endm
354 .macro VLGVF gr, vr, disp, base="%r0"
355 VLGV \gr, \vr, \disp, \base, 2
356 .endm
357 .macro VLGVG gr, vr, disp, base="%r0"
358 VLGV \gr, \vr, \disp, \base, 3
359 .endm
361 /* VECTOR LOAD MULTIPLE */
362 .macro VLM vfrom, vto, disp, base
363 VX_NUM v1, \vfrom
364 VX_NUM v3, \vto
365 GR_NUM b2, \base /* Base register */
366 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3)
367 .word (b2 << 12) | (\disp)
368 MRXBOPC 0, 0x36, v1, v3
369 .endm
371 /* VECTOR STORE MULTIPLE */
372 .macro VSTM vfrom, vto, disp, base
373 VX_NUM v1, \vfrom
374 VX_NUM v3, \vto
375 GR_NUM b2, \base /* Base register */
376 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3)
377 .word (b2 << 12) | (\disp)
378 MRXBOPC 0, 0x3E, v1, v3
379 .endm
381 /* VECTOR PERMUTE */
382 .macro VPERM vr1, vr2, vr3, vr4
383 VX_NUM v1, \vr1
384 VX_NUM v2, \vr2
385 VX_NUM v3, \vr3
386 VX_NUM v4, \vr4
387 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
388 .word (VX_R(v3) << 12)
389 MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4
390 .endm
392 /* VECTOR UNPACK LOGICAL LOW */
393 .macro VUPLL vr1, vr2, m3
394 VX_NUM v1, \vr1
395 VX_NUM v2, \vr2
396 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
397 .word 0x0000
398 MRXBOPC \m3, 0xD4, v1, v2
399 .endm
400 .macro VUPLLB vr1, vr2
401 VUPLL \vr1, \vr2, 0
402 .endm
403 .macro VUPLLH vr1, vr2
404 VUPLL \vr1, \vr2, 1
405 .endm
406 .macro VUPLLF vr1, vr2
407 VUPLL \vr1, \vr2, 2
408 .endm
411 /* Vector integer instructions */
413 /* VECTOR EXCLUSIVE OR */
414 .macro VX vr1, vr2, vr3
415 VX_NUM v1, \vr1
416 VX_NUM v2, \vr2
417 VX_NUM v3, \vr3
418 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
419 .word (VX_R(v3) << 12)
420 MRXBOPC 0, 0x6D, v1, v2, v3
421 .endm
423 /* VECTOR GALOIS FIELD MULTIPLY SUM */
424 .macro VGFM vr1, vr2, vr3, m4
425 VX_NUM v1, \vr1
426 VX_NUM v2, \vr2
427 VX_NUM v3, \vr3
428 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
429 .word (VX_R(v3) << 12)
430 MRXBOPC \m4, 0xB4, v1, v2, v3
431 .endm
432 .macro VGFMB vr1, vr2, vr3
433 VGFM \vr1, \vr2, \vr3, 0
434 .endm
435 .macro VGFMH vr1, vr2, vr3
436 VGFM \vr1, \vr2, \vr3, 1
437 .endm
438 .macro VGFMF vr1, vr2, vr3
439 VGFM \vr1, \vr2, \vr3, 2
440 .endm
441 .macro VGFMG vr1, vr2, vr3
442 VGFM \vr1, \vr2, \vr3, 3
443 .endm
445 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
446 .macro VGFMA vr1, vr2, vr3, vr4, m5
447 VX_NUM v1, \vr1
448 VX_NUM v2, \vr2
449 VX_NUM v3, \vr3
450 VX_NUM v4, \vr4
451 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
452 .word (VX_R(v3) << 12) | (\m5 << 8)
453 MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4
454 .endm
455 .macro VGFMAB vr1, vr2, vr3, vr4
456 VGFMA \vr1, \vr2, \vr3, \vr4, 0
457 .endm
458 .macro VGFMAH vr1, vr2, vr3, vr4
459 VGFMA \vr1, \vr2, \vr3, \vr4, 1
460 .endm
461 .macro VGFMAF vr1, vr2, vr3, vr4
462 VGFMA \vr1, \vr2, \vr3, \vr4, 2
463 .endm
464 .macro VGFMAG vr1, vr2, vr3, vr4
465 VGFMA \vr1, \vr2, \vr3, \vr4, 3
466 .endm
468 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
469 .macro VSRLB vr1, vr2, vr3
470 VX_NUM v1, \vr1
471 VX_NUM v2, \vr2
472 VX_NUM v3, \vr3
473 .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
474 .word (VX_R(v3) << 12)
475 MRXBOPC 0, 0x7D, v1, v2, v3
476 .endm
479 #endif /* __ASSEMBLY__ */
480 #endif /* __ASM_S390_VX_INSN_H */