4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2016 Romain Dolbeau. All rights reserved.
23 * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
26 #include <sys/isa_defs.h>
28 #if defined(__x86_64) && defined(HAVE_AVX512F)
30 #include <sys/types.h>
32 #include <sys/debug.h>
35 #define __asm __asm__ __volatile__
38 #define _REG_CNT(_0, _1, _2, _3, _4, _5, _6, _7, N, ...) N
39 #define REG_CNT(r...) _REG_CNT(r, 8, 7, 6, 5, 4, 3, 2, 1)
41 #define VR0_(REG, ...) "zmm"#REG
42 #define VR1_(_1, REG, ...) "zmm"#REG
43 #define VR2_(_1, _2, REG, ...) "zmm"#REG
44 #define VR3_(_1, _2, _3, REG, ...) "zmm"#REG
45 #define VR4_(_1, _2, _3, _4, REG, ...) "zmm"#REG
46 #define VR5_(_1, _2, _3, _4, _5, REG, ...) "zmm"#REG
47 #define VR6_(_1, _2, _3, _4, _5, _6, REG, ...) "zmm"#REG
48 #define VR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) "zmm"#REG
50 #define VR0(r...) VR0_(r)
51 #define VR1(r...) VR1_(r)
52 #define VR2(r...) VR2_(r, 1)
53 #define VR3(r...) VR3_(r, 1, 2)
54 #define VR4(r...) VR4_(r, 1, 2)
55 #define VR5(r...) VR5_(r, 1, 2, 3)
56 #define VR6(r...) VR6_(r, 1, 2, 3, 4)
57 #define VR7(r...) VR7_(r, 1, 2, 3, 4, 5)
59 #define VRy0_(REG, ...) "ymm"#REG
60 #define VRy1_(_1, REG, ...) "ymm"#REG
61 #define VRy2_(_1, _2, REG, ...) "ymm"#REG
62 #define VRy3_(_1, _2, _3, REG, ...) "ymm"#REG
63 #define VRy4_(_1, _2, _3, _4, REG, ...) "ymm"#REG
64 #define VRy5_(_1, _2, _3, _4, _5, REG, ...) "ymm"#REG
65 #define VRy6_(_1, _2, _3, _4, _5, _6, REG, ...) "ymm"#REG
66 #define VRy7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) "ymm"#REG
68 #define VRy0(r...) VRy0_(r)
69 #define VRy1(r...) VRy1_(r)
70 #define VRy2(r...) VRy2_(r, 1)
71 #define VRy3(r...) VRy3_(r, 1, 2)
72 #define VRy4(r...) VRy4_(r, 1, 2)
73 #define VRy5(r...) VRy5_(r, 1, 2, 3)
74 #define VRy6(r...) VRy6_(r, 1, 2, 3, 4)
75 #define VRy7(r...) VRy7_(r, 1, 2, 3, 4, 5)
77 #define R_01(REG1, REG2, ...) REG1, REG2
78 #define _R_23(_0, _1, REG2, REG3, ...) REG2, REG3
79 #define R_23(REG...) _R_23(REG, 1, 2, 3)
84 uint8_t b
[ELEM_SIZE
] __attribute__((aligned(ELEM_SIZE
)));
88 #define XOR_ACC(src, r...) \
90 switch (REG_CNT(r)) { \
93 "vpxorq 0x00(%[SRC]), %%" VR0(r)", %%" VR0(r) "\n" \
94 "vpxorq 0x40(%[SRC]), %%" VR1(r)", %%" VR1(r) "\n" \
95 "vpxorq 0x80(%[SRC]), %%" VR2(r)", %%" VR2(r) "\n" \
96 "vpxorq 0xc0(%[SRC]), %%" VR3(r)", %%" VR3(r) "\n" \
97 : : [SRC] "r" (src)); \
104 switch (REG_CNT(r)) { \
107 "vpxorq %" VR0(r) ", %" VR4(r)", %" VR4(r) "\n" \
108 "vpxorq %" VR1(r) ", %" VR5(r)", %" VR5(r) "\n" \
109 "vpxorq %" VR2(r) ", %" VR6(r)", %" VR6(r) "\n" \
110 "vpxorq %" VR3(r) ", %" VR7(r)", %" VR7(r)); \
114 "vpxorq %" VR0(r) ", %" VR2(r)", %" VR2(r) "\n" \
115 "vpxorq %" VR1(r) ", %" VR3(r)", %" VR3(r)); \
121 #define ZERO(r...) XOR(r, r)
126 switch (REG_CNT(r)) { \
129 "vmovdqa64 %" VR0(r) ", %" VR4(r) "\n" \
130 "vmovdqa64 %" VR1(r) ", %" VR5(r) "\n" \
131 "vmovdqa64 %" VR2(r) ", %" VR6(r) "\n" \
132 "vmovdqa64 %" VR3(r) ", %" VR7(r)); \
136 "vmovdqa64 %" VR0(r) ", %" VR2(r) "\n" \
137 "vmovdqa64 %" VR1(r) ", %" VR3(r)); \
142 #define LOAD(src, r...) \
144 switch (REG_CNT(r)) { \
147 "vmovdqa64 0x00(%[SRC]), %%" VR0(r) "\n" \
148 "vmovdqa64 0x40(%[SRC]), %%" VR1(r) "\n" \
149 "vmovdqa64 0x80(%[SRC]), %%" VR2(r) "\n" \
150 "vmovdqa64 0xc0(%[SRC]), %%" VR3(r) "\n" \
151 : : [SRC] "r" (src)); \
156 #define STORE(dst, r...) \
158 switch (REG_CNT(r)) { \
161 "vmovdqa64 %%" VR0(r) ", 0x00(%[DST])\n" \
162 "vmovdqa64 %%" VR1(r) ", 0x40(%[DST])\n" \
163 "vmovdqa64 %%" VR2(r) ", 0x80(%[DST])\n" \
164 "vmovdqa64 %%" VR3(r) ", 0xc0(%[DST])\n" \
165 : : [DST] "r" (dst)); \
170 #define MUL2_SETUP() \
172 __asm("vmovq %0, %%xmm31" :: "r"(0x1d1d1d1d1d1d1d1d)); \
173 __asm("vpbroadcastq %xmm31, %zmm31"); \
174 __asm("vmovq %0, %%xmm30" :: "r"(0x8080808080808080)); \
175 __asm("vpbroadcastq %xmm30, %zmm30"); \
176 __asm("vmovq %0, %%xmm29" :: "r"(0xfefefefefefefefe)); \
177 __asm("vpbroadcastq %xmm29, %zmm29"); \
180 #define _MUL2(r...) \
182 switch (REG_CNT(r)) { \
185 "vpandq %" VR0(r)", %zmm30, %zmm26\n" \
186 "vpandq %" VR1(r)", %zmm30, %zmm25\n" \
187 "vpsrlq $7, %zmm26, %zmm28\n" \
188 "vpsrlq $7, %zmm25, %zmm27\n" \
189 "vpsllq $1, %zmm26, %zmm26\n" \
190 "vpsllq $1, %zmm25, %zmm25\n" \
191 "vpsubq %zmm28, %zmm26, %zmm26\n" \
192 "vpsubq %zmm27, %zmm25, %zmm25\n" \
193 "vpsllq $1, %" VR0(r)", %" VR0(r) "\n" \
194 "vpsllq $1, %" VR1(r)", %" VR1(r) "\n" \
195 "vpandq %zmm26, %zmm31, %zmm26\n" \
196 "vpandq %zmm25, %zmm31, %zmm25\n" \
197 "vpternlogd $0x6c,%zmm29, %zmm26, %" VR0(r) "\n" \
198 "vpternlogd $0x6c,%zmm29, %zmm25, %" VR1(r)); \
207 switch (REG_CNT(r)) { \
225 /* General multiplication by adding powers of two */
227 #define _mul_x2_in 21, 22
228 #define _mul_x2_acc 23, 24
230 #define _MUL_PARAM(x, in, acc) \
232 if (x & 0x01) { COPY(in, acc); } else { ZERO(acc); } \
233 if (x & 0xfe) { MUL2(in); } \
234 if (x & 0x02) { XOR(in, acc); } \
235 if (x & 0xfc) { MUL2(in); } \
236 if (x & 0x04) { XOR(in, acc); } \
237 if (x & 0xf8) { MUL2(in); } \
238 if (x & 0x08) { XOR(in, acc); } \
239 if (x & 0xf0) { MUL2(in); } \
240 if (x & 0x10) { XOR(in, acc); } \
241 if (x & 0xe0) { MUL2(in); } \
242 if (x & 0x20) { XOR(in, acc); } \
243 if (x & 0xc0) { MUL2(in); } \
244 if (x & 0x40) { XOR(in, acc); } \
245 if (x & 0x80) { MUL2(in); XOR(in, acc); } \
248 #define MUL_x2_DEFINE(x) \
250 mul_x2_ ## x(void) { _MUL_PARAM(x, _mul_x2_in, _mul_x2_acc); }
253 MUL_x2_DEFINE(0); MUL_x2_DEFINE(1); MUL_x2_DEFINE(2); MUL_x2_DEFINE(3);
254 MUL_x2_DEFINE(4); MUL_x2_DEFINE(5); MUL_x2_DEFINE(6); MUL_x2_DEFINE(7);
255 MUL_x2_DEFINE(8); MUL_x2_DEFINE(9); MUL_x2_DEFINE(10); MUL_x2_DEFINE(11);
256 MUL_x2_DEFINE(12); MUL_x2_DEFINE(13); MUL_x2_DEFINE(14); MUL_x2_DEFINE(15);
257 MUL_x2_DEFINE(16); MUL_x2_DEFINE(17); MUL_x2_DEFINE(18); MUL_x2_DEFINE(19);
258 MUL_x2_DEFINE(20); MUL_x2_DEFINE(21); MUL_x2_DEFINE(22); MUL_x2_DEFINE(23);
259 MUL_x2_DEFINE(24); MUL_x2_DEFINE(25); MUL_x2_DEFINE(26); MUL_x2_DEFINE(27);
260 MUL_x2_DEFINE(28); MUL_x2_DEFINE(29); MUL_x2_DEFINE(30); MUL_x2_DEFINE(31);
261 MUL_x2_DEFINE(32); MUL_x2_DEFINE(33); MUL_x2_DEFINE(34); MUL_x2_DEFINE(35);
262 MUL_x2_DEFINE(36); MUL_x2_DEFINE(37); MUL_x2_DEFINE(38); MUL_x2_DEFINE(39);
263 MUL_x2_DEFINE(40); MUL_x2_DEFINE(41); MUL_x2_DEFINE(42); MUL_x2_DEFINE(43);
264 MUL_x2_DEFINE(44); MUL_x2_DEFINE(45); MUL_x2_DEFINE(46); MUL_x2_DEFINE(47);
265 MUL_x2_DEFINE(48); MUL_x2_DEFINE(49); MUL_x2_DEFINE(50); MUL_x2_DEFINE(51);
266 MUL_x2_DEFINE(52); MUL_x2_DEFINE(53); MUL_x2_DEFINE(54); MUL_x2_DEFINE(55);
267 MUL_x2_DEFINE(56); MUL_x2_DEFINE(57); MUL_x2_DEFINE(58); MUL_x2_DEFINE(59);
268 MUL_x2_DEFINE(60); MUL_x2_DEFINE(61); MUL_x2_DEFINE(62); MUL_x2_DEFINE(63);
269 MUL_x2_DEFINE(64); MUL_x2_DEFINE(65); MUL_x2_DEFINE(66); MUL_x2_DEFINE(67);
270 MUL_x2_DEFINE(68); MUL_x2_DEFINE(69); MUL_x2_DEFINE(70); MUL_x2_DEFINE(71);
271 MUL_x2_DEFINE(72); MUL_x2_DEFINE(73); MUL_x2_DEFINE(74); MUL_x2_DEFINE(75);
272 MUL_x2_DEFINE(76); MUL_x2_DEFINE(77); MUL_x2_DEFINE(78); MUL_x2_DEFINE(79);
273 MUL_x2_DEFINE(80); MUL_x2_DEFINE(81); MUL_x2_DEFINE(82); MUL_x2_DEFINE(83);
274 MUL_x2_DEFINE(84); MUL_x2_DEFINE(85); MUL_x2_DEFINE(86); MUL_x2_DEFINE(87);
275 MUL_x2_DEFINE(88); MUL_x2_DEFINE(89); MUL_x2_DEFINE(90); MUL_x2_DEFINE(91);
276 MUL_x2_DEFINE(92); MUL_x2_DEFINE(93); MUL_x2_DEFINE(94); MUL_x2_DEFINE(95);
277 MUL_x2_DEFINE(96); MUL_x2_DEFINE(97); MUL_x2_DEFINE(98); MUL_x2_DEFINE(99);
278 MUL_x2_DEFINE(100); MUL_x2_DEFINE(101); MUL_x2_DEFINE(102); MUL_x2_DEFINE(103);
279 MUL_x2_DEFINE(104); MUL_x2_DEFINE(105); MUL_x2_DEFINE(106); MUL_x2_DEFINE(107);
280 MUL_x2_DEFINE(108); MUL_x2_DEFINE(109); MUL_x2_DEFINE(110); MUL_x2_DEFINE(111);
281 MUL_x2_DEFINE(112); MUL_x2_DEFINE(113); MUL_x2_DEFINE(114); MUL_x2_DEFINE(115);
282 MUL_x2_DEFINE(116); MUL_x2_DEFINE(117); MUL_x2_DEFINE(118); MUL_x2_DEFINE(119);
283 MUL_x2_DEFINE(120); MUL_x2_DEFINE(121); MUL_x2_DEFINE(122); MUL_x2_DEFINE(123);
284 MUL_x2_DEFINE(124); MUL_x2_DEFINE(125); MUL_x2_DEFINE(126); MUL_x2_DEFINE(127);
285 MUL_x2_DEFINE(128); MUL_x2_DEFINE(129); MUL_x2_DEFINE(130); MUL_x2_DEFINE(131);
286 MUL_x2_DEFINE(132); MUL_x2_DEFINE(133); MUL_x2_DEFINE(134); MUL_x2_DEFINE(135);
287 MUL_x2_DEFINE(136); MUL_x2_DEFINE(137); MUL_x2_DEFINE(138); MUL_x2_DEFINE(139);
288 MUL_x2_DEFINE(140); MUL_x2_DEFINE(141); MUL_x2_DEFINE(142); MUL_x2_DEFINE(143);
289 MUL_x2_DEFINE(144); MUL_x2_DEFINE(145); MUL_x2_DEFINE(146); MUL_x2_DEFINE(147);
290 MUL_x2_DEFINE(148); MUL_x2_DEFINE(149); MUL_x2_DEFINE(150); MUL_x2_DEFINE(151);
291 MUL_x2_DEFINE(152); MUL_x2_DEFINE(153); MUL_x2_DEFINE(154); MUL_x2_DEFINE(155);
292 MUL_x2_DEFINE(156); MUL_x2_DEFINE(157); MUL_x2_DEFINE(158); MUL_x2_DEFINE(159);
293 MUL_x2_DEFINE(160); MUL_x2_DEFINE(161); MUL_x2_DEFINE(162); MUL_x2_DEFINE(163);
294 MUL_x2_DEFINE(164); MUL_x2_DEFINE(165); MUL_x2_DEFINE(166); MUL_x2_DEFINE(167);
295 MUL_x2_DEFINE(168); MUL_x2_DEFINE(169); MUL_x2_DEFINE(170); MUL_x2_DEFINE(171);
296 MUL_x2_DEFINE(172); MUL_x2_DEFINE(173); MUL_x2_DEFINE(174); MUL_x2_DEFINE(175);
297 MUL_x2_DEFINE(176); MUL_x2_DEFINE(177); MUL_x2_DEFINE(178); MUL_x2_DEFINE(179);
298 MUL_x2_DEFINE(180); MUL_x2_DEFINE(181); MUL_x2_DEFINE(182); MUL_x2_DEFINE(183);
299 MUL_x2_DEFINE(184); MUL_x2_DEFINE(185); MUL_x2_DEFINE(186); MUL_x2_DEFINE(187);
300 MUL_x2_DEFINE(188); MUL_x2_DEFINE(189); MUL_x2_DEFINE(190); MUL_x2_DEFINE(191);
301 MUL_x2_DEFINE(192); MUL_x2_DEFINE(193); MUL_x2_DEFINE(194); MUL_x2_DEFINE(195);
302 MUL_x2_DEFINE(196); MUL_x2_DEFINE(197); MUL_x2_DEFINE(198); MUL_x2_DEFINE(199);
303 MUL_x2_DEFINE(200); MUL_x2_DEFINE(201); MUL_x2_DEFINE(202); MUL_x2_DEFINE(203);
304 MUL_x2_DEFINE(204); MUL_x2_DEFINE(205); MUL_x2_DEFINE(206); MUL_x2_DEFINE(207);
305 MUL_x2_DEFINE(208); MUL_x2_DEFINE(209); MUL_x2_DEFINE(210); MUL_x2_DEFINE(211);
306 MUL_x2_DEFINE(212); MUL_x2_DEFINE(213); MUL_x2_DEFINE(214); MUL_x2_DEFINE(215);
307 MUL_x2_DEFINE(216); MUL_x2_DEFINE(217); MUL_x2_DEFINE(218); MUL_x2_DEFINE(219);
308 MUL_x2_DEFINE(220); MUL_x2_DEFINE(221); MUL_x2_DEFINE(222); MUL_x2_DEFINE(223);
309 MUL_x2_DEFINE(224); MUL_x2_DEFINE(225); MUL_x2_DEFINE(226); MUL_x2_DEFINE(227);
310 MUL_x2_DEFINE(228); MUL_x2_DEFINE(229); MUL_x2_DEFINE(230); MUL_x2_DEFINE(231);
311 MUL_x2_DEFINE(232); MUL_x2_DEFINE(233); MUL_x2_DEFINE(234); MUL_x2_DEFINE(235);
312 MUL_x2_DEFINE(236); MUL_x2_DEFINE(237); MUL_x2_DEFINE(238); MUL_x2_DEFINE(239);
313 MUL_x2_DEFINE(240); MUL_x2_DEFINE(241); MUL_x2_DEFINE(242); MUL_x2_DEFINE(243);
314 MUL_x2_DEFINE(244); MUL_x2_DEFINE(245); MUL_x2_DEFINE(246); MUL_x2_DEFINE(247);
315 MUL_x2_DEFINE(248); MUL_x2_DEFINE(249); MUL_x2_DEFINE(250); MUL_x2_DEFINE(251);
316 MUL_x2_DEFINE(252); MUL_x2_DEFINE(253); MUL_x2_DEFINE(254); MUL_x2_DEFINE(255);
319 typedef void (*mul_fn_ptr_t
)(void);
321 static const mul_fn_ptr_t
__attribute__((aligned(256)))
322 gf_x2_mul_fns
[256] = {
323 mul_x2_0
, mul_x2_1
, mul_x2_2
, mul_x2_3
, mul_x2_4
, mul_x2_5
,
324 mul_x2_6
, mul_x2_7
, mul_x2_8
, mul_x2_9
, mul_x2_10
, mul_x2_11
,
325 mul_x2_12
, mul_x2_13
, mul_x2_14
, mul_x2_15
, mul_x2_16
, mul_x2_17
,
326 mul_x2_18
, mul_x2_19
, mul_x2_20
, mul_x2_21
, mul_x2_22
, mul_x2_23
,
327 mul_x2_24
, mul_x2_25
, mul_x2_26
, mul_x2_27
, mul_x2_28
, mul_x2_29
,
328 mul_x2_30
, mul_x2_31
, mul_x2_32
, mul_x2_33
, mul_x2_34
, mul_x2_35
,
329 mul_x2_36
, mul_x2_37
, mul_x2_38
, mul_x2_39
, mul_x2_40
, mul_x2_41
,
330 mul_x2_42
, mul_x2_43
, mul_x2_44
, mul_x2_45
, mul_x2_46
, mul_x2_47
,
331 mul_x2_48
, mul_x2_49
, mul_x2_50
, mul_x2_51
, mul_x2_52
, mul_x2_53
,
332 mul_x2_54
, mul_x2_55
, mul_x2_56
, mul_x2_57
, mul_x2_58
, mul_x2_59
,
333 mul_x2_60
, mul_x2_61
, mul_x2_62
, mul_x2_63
, mul_x2_64
, mul_x2_65
,
334 mul_x2_66
, mul_x2_67
, mul_x2_68
, mul_x2_69
, mul_x2_70
, mul_x2_71
,
335 mul_x2_72
, mul_x2_73
, mul_x2_74
, mul_x2_75
, mul_x2_76
, mul_x2_77
,
336 mul_x2_78
, mul_x2_79
, mul_x2_80
, mul_x2_81
, mul_x2_82
, mul_x2_83
,
337 mul_x2_84
, mul_x2_85
, mul_x2_86
, mul_x2_87
, mul_x2_88
, mul_x2_89
,
338 mul_x2_90
, mul_x2_91
, mul_x2_92
, mul_x2_93
, mul_x2_94
, mul_x2_95
,
339 mul_x2_96
, mul_x2_97
, mul_x2_98
, mul_x2_99
, mul_x2_100
, mul_x2_101
,
340 mul_x2_102
, mul_x2_103
, mul_x2_104
, mul_x2_105
, mul_x2_106
, mul_x2_107
,
341 mul_x2_108
, mul_x2_109
, mul_x2_110
, mul_x2_111
, mul_x2_112
, mul_x2_113
,
342 mul_x2_114
, mul_x2_115
, mul_x2_116
, mul_x2_117
, mul_x2_118
, mul_x2_119
,
343 mul_x2_120
, mul_x2_121
, mul_x2_122
, mul_x2_123
, mul_x2_124
, mul_x2_125
,
344 mul_x2_126
, mul_x2_127
, mul_x2_128
, mul_x2_129
, mul_x2_130
, mul_x2_131
,
345 mul_x2_132
, mul_x2_133
, mul_x2_134
, mul_x2_135
, mul_x2_136
, mul_x2_137
,
346 mul_x2_138
, mul_x2_139
, mul_x2_140
, mul_x2_141
, mul_x2_142
, mul_x2_143
,
347 mul_x2_144
, mul_x2_145
, mul_x2_146
, mul_x2_147
, mul_x2_148
, mul_x2_149
,
348 mul_x2_150
, mul_x2_151
, mul_x2_152
, mul_x2_153
, mul_x2_154
, mul_x2_155
,
349 mul_x2_156
, mul_x2_157
, mul_x2_158
, mul_x2_159
, mul_x2_160
, mul_x2_161
,
350 mul_x2_162
, mul_x2_163
, mul_x2_164
, mul_x2_165
, mul_x2_166
, mul_x2_167
,
351 mul_x2_168
, mul_x2_169
, mul_x2_170
, mul_x2_171
, mul_x2_172
, mul_x2_173
,
352 mul_x2_174
, mul_x2_175
, mul_x2_176
, mul_x2_177
, mul_x2_178
, mul_x2_179
,
353 mul_x2_180
, mul_x2_181
, mul_x2_182
, mul_x2_183
, mul_x2_184
, mul_x2_185
,
354 mul_x2_186
, mul_x2_187
, mul_x2_188
, mul_x2_189
, mul_x2_190
, mul_x2_191
,
355 mul_x2_192
, mul_x2_193
, mul_x2_194
, mul_x2_195
, mul_x2_196
, mul_x2_197
,
356 mul_x2_198
, mul_x2_199
, mul_x2_200
, mul_x2_201
, mul_x2_202
, mul_x2_203
,
357 mul_x2_204
, mul_x2_205
, mul_x2_206
, mul_x2_207
, mul_x2_208
, mul_x2_209
,
358 mul_x2_210
, mul_x2_211
, mul_x2_212
, mul_x2_213
, mul_x2_214
, mul_x2_215
,
359 mul_x2_216
, mul_x2_217
, mul_x2_218
, mul_x2_219
, mul_x2_220
, mul_x2_221
,
360 mul_x2_222
, mul_x2_223
, mul_x2_224
, mul_x2_225
, mul_x2_226
, mul_x2_227
,
361 mul_x2_228
, mul_x2_229
, mul_x2_230
, mul_x2_231
, mul_x2_232
, mul_x2_233
,
362 mul_x2_234
, mul_x2_235
, mul_x2_236
, mul_x2_237
, mul_x2_238
, mul_x2_239
,
363 mul_x2_240
, mul_x2_241
, mul_x2_242
, mul_x2_243
, mul_x2_244
, mul_x2_245
,
364 mul_x2_246
, mul_x2_247
, mul_x2_248
, mul_x2_249
, mul_x2_250
, mul_x2_251
,
365 mul_x2_252
, mul_x2_253
, mul_x2_254
, mul_x2_255
368 #define MUL(c, r...) \
370 switch (REG_CNT(r)) { \
372 COPY(R_01(r), _mul_x2_in); \
373 gf_x2_mul_fns[c](); \
374 COPY(_mul_x2_acc, R_01(r)); \
375 COPY(R_23(r), _mul_x2_in); \
376 gf_x2_mul_fns[c](); \
377 COPY(_mul_x2_acc, R_23(r)); \
385 #define raidz_math_begin() kfpu_begin()
386 #define raidz_math_end() kfpu_end()
391 #define ZERO_STRIDE 4
392 #define ZERO_DEFINE() {}
393 #define ZERO_D 0, 1, 2, 3
395 #define COPY_STRIDE 4
396 #define COPY_DEFINE() {}
397 #define COPY_D 0, 1, 2, 3
400 #define ADD_DEFINE() {}
401 #define ADD_D 0, 1, 2, 3
404 #define MUL_DEFINE() MUL2_SETUP()
405 #define MUL_D 0, 1, 2, 3
407 #define GEN_P_STRIDE 4
408 #define GEN_P_DEFINE() {}
409 #define GEN_P_P 0, 1, 2, 3
411 #define GEN_PQ_STRIDE 4
412 #define GEN_PQ_DEFINE() {}
413 #define GEN_PQ_D 0, 1, 2, 3
414 #define GEN_PQ_C 4, 5, 6, 7
416 #define GEN_PQR_STRIDE 4
417 #define GEN_PQR_DEFINE() {}
418 #define GEN_PQR_D 0, 1, 2, 3
419 #define GEN_PQR_C 4, 5, 6, 7
421 #define SYN_Q_DEFINE() {}
422 #define SYN_Q_D 0, 1, 2, 3
423 #define SYN_Q_X 4, 5, 6, 7
425 #define SYN_R_DEFINE() {}
426 #define SYN_R_D 0, 1, 2, 3
427 #define SYN_R_X 4, 5, 6, 7
429 #define SYN_PQ_DEFINE() {}
430 #define SYN_PQ_D 0, 1, 2, 3
431 #define SYN_PQ_X 4, 5, 6, 7
433 #define REC_PQ_STRIDE 4
434 #define REC_PQ_DEFINE() MUL2_SETUP()
435 #define REC_PQ_X 0, 1, 2, 3
436 #define REC_PQ_Y 4, 5, 6, 7
437 #define REC_PQ_T 8, 9, 10, 11
439 #define SYN_PR_DEFINE() {}
440 #define SYN_PR_D 0, 1, 2, 3
441 #define SYN_PR_X 4, 5, 6, 7
443 #define REC_PR_STRIDE 4
444 #define REC_PR_DEFINE() MUL2_SETUP()
445 #define REC_PR_X 0, 1, 2, 3
446 #define REC_PR_Y 4, 5, 6, 7
447 #define REC_PR_T 8, 9, 10, 11
449 #define SYN_QR_DEFINE() {}
450 #define SYN_QR_D 0, 1, 2, 3
451 #define SYN_QR_X 4, 5, 6, 7
453 #define REC_QR_STRIDE 4
454 #define REC_QR_DEFINE() MUL2_SETUP()
455 #define REC_QR_X 0, 1, 2, 3
456 #define REC_QR_Y 4, 5, 6, 7
457 #define REC_QR_T 8, 9, 10, 11
459 #define SYN_PQR_DEFINE() {}
460 #define SYN_PQR_D 0, 1, 2, 3
461 #define SYN_PQR_X 4, 5, 6, 7
463 #define REC_PQR_STRIDE 4
464 #define REC_PQR_DEFINE() MUL2_SETUP()
465 #define REC_PQR_X 0, 1, 2, 3
466 #define REC_PQR_Y 4, 5, 6, 7
467 #define REC_PQR_Z 8, 9, 10, 11
468 #define REC_PQR_XS 12, 13, 14, 15
469 #define REC_PQR_YS 16, 17, 18, 19
472 #include <sys/vdev_raidz_impl.h>
473 #include "vdev_raidz_math_impl.h"
475 DEFINE_GEN_METHODS(avx512f
);
476 DEFINE_REC_METHODS(avx512f
);
479 raidz_will_avx512f_work(void)
481 return (kfpu_allowed() && zfs_avx_available() &&
482 zfs_avx2_available() && zfs_avx512f_available());
485 const raidz_impl_ops_t vdev_raidz_avx512f_impl
= {
488 .gen
= RAIDZ_GEN_METHODS(avx512f
),
489 .rec
= RAIDZ_REC_METHODS(avx512f
),
490 .is_supported
= &raidz_will_avx512f_work
,
494 #endif /* defined(__x86_64) && defined(HAVE_AVX512F) */