Restore clearing of L2CACHE flag in arc_read_done()
[zfs.git] / module / zcommon / zfs_fletcher_avx512.c
blob300ec4c1fb69e018fc31a471147ae25395f57204
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
25 #if defined(__x86_64) && defined(HAVE_AVX512F)
27 #include <sys/byteorder.h>
28 #include <sys/frame.h>
29 #include <sys/spa_checksum.h>
30 #include <sys/strings.h>
31 #include <sys/simd.h>
32 #include <zfs_fletcher.h>
34 #ifdef __linux__
35 #define __asm __asm__ __volatile__
36 #endif
38 static void
39 fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
41 bzero(ctx->avx512, 4 * sizeof (zfs_fletcher_avx512_t));
44 static void
45 fletcher_4_avx512f_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
47 static const uint64_t
48 CcA[] = { 0, 0, 1, 3, 6, 10, 15, 21 },
49 CcB[] = { 28, 36, 44, 52, 60, 68, 76, 84 },
50 DcA[] = { 0, 0, 0, 1, 4, 10, 20, 35 },
51 DcB[] = { 56, 84, 120, 164, 216, 276, 344, 420 },
52 DcC[] = { 448, 512, 576, 640, 704, 768, 832, 896 };
54 uint64_t A, B, C, D;
55 uint64_t i;
57 A = ctx->avx512[0].v[0];
58 B = 8 * ctx->avx512[1].v[0];
59 C = 64 * ctx->avx512[2].v[0] - CcB[0] * ctx->avx512[1].v[0];
60 D = 512 * ctx->avx512[3].v[0] - DcC[0] * ctx->avx512[2].v[0] +
61 DcB[0] * ctx->avx512[1].v[0];
63 for (i = 1; i < 8; i++) {
64 A += ctx->avx512[0].v[i];
65 B += 8 * ctx->avx512[1].v[i] - i * ctx->avx512[0].v[i];
66 C += 64 * ctx->avx512[2].v[i] - CcB[i] * ctx->avx512[1].v[i] +
67 CcA[i] * ctx->avx512[0].v[i];
68 D += 512 * ctx->avx512[3].v[i] - DcC[i] * ctx->avx512[2].v[i] +
69 DcB[i] * ctx->avx512[1].v[i] - DcA[i] * ctx->avx512[0].v[i];
72 ZIO_SET_CHECKSUM(zcp, A, B, C, D);
75 #define FLETCHER_4_AVX512_RESTORE_CTX(ctx) \
76 { \
77 __asm("vmovdqu64 %0, %%zmm0" :: "m" ((ctx)->avx512[0])); \
78 __asm("vmovdqu64 %0, %%zmm1" :: "m" ((ctx)->avx512[1])); \
79 __asm("vmovdqu64 %0, %%zmm2" :: "m" ((ctx)->avx512[2])); \
80 __asm("vmovdqu64 %0, %%zmm3" :: "m" ((ctx)->avx512[3])); \
83 #define FLETCHER_4_AVX512_SAVE_CTX(ctx) \
84 { \
85 __asm("vmovdqu64 %%zmm0, %0" : "=m" ((ctx)->avx512[0])); \
86 __asm("vmovdqu64 %%zmm1, %0" : "=m" ((ctx)->avx512[1])); \
87 __asm("vmovdqu64 %%zmm2, %0" : "=m" ((ctx)->avx512[2])); \
88 __asm("vmovdqu64 %%zmm3, %0" : "=m" ((ctx)->avx512[3])); \
91 static void
92 fletcher_4_avx512f_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
94 const uint32_t *ip = buf;
95 const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
97 kfpu_begin();
99 FLETCHER_4_AVX512_RESTORE_CTX(ctx);
101 for (; ip < ipend; ip += 8) {
102 __asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
103 __asm("vpaddq %zmm4, %zmm0, %zmm0");
104 __asm("vpaddq %zmm0, %zmm1, %zmm1");
105 __asm("vpaddq %zmm1, %zmm2, %zmm2");
106 __asm("vpaddq %zmm2, %zmm3, %zmm3");
109 FLETCHER_4_AVX512_SAVE_CTX(ctx);
111 kfpu_end();
113 STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_native);
115 static void
116 fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
117 uint64_t size)
119 static const uint64_t byteswap_mask = 0xFFULL;
120 const uint32_t *ip = buf;
121 const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
123 kfpu_begin();
125 FLETCHER_4_AVX512_RESTORE_CTX(ctx);
127 __asm("vpbroadcastq %0, %%zmm8" :: "r" (byteswap_mask));
128 __asm("vpsllq $8, %zmm8, %zmm9");
129 __asm("vpsllq $16, %zmm8, %zmm10");
130 __asm("vpsllq $24, %zmm8, %zmm11");
132 for (; ip < ipend; ip += 8) {
133 __asm("vpmovzxdq %0, %%zmm5"::"m" (*ip));
135 __asm("vpsrlq $24, %zmm5, %zmm6");
136 __asm("vpandd %zmm8, %zmm6, %zmm6");
137 __asm("vpsrlq $8, %zmm5, %zmm7");
138 __asm("vpandd %zmm9, %zmm7, %zmm7");
139 __asm("vpord %zmm6, %zmm7, %zmm4");
140 __asm("vpsllq $8, %zmm5, %zmm6");
141 __asm("vpandd %zmm10, %zmm6, %zmm6");
142 __asm("vpord %zmm6, %zmm4, %zmm4");
143 __asm("vpsllq $24, %zmm5, %zmm5");
144 __asm("vpandd %zmm11, %zmm5, %zmm5");
145 __asm("vpord %zmm5, %zmm4, %zmm4");
147 __asm("vpaddq %zmm4, %zmm0, %zmm0");
148 __asm("vpaddq %zmm0, %zmm1, %zmm1");
149 __asm("vpaddq %zmm1, %zmm2, %zmm2");
150 __asm("vpaddq %zmm2, %zmm3, %zmm3");
153 FLETCHER_4_AVX512_SAVE_CTX(ctx)
155 kfpu_end();
157 STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_byteswap);
159 static boolean_t
160 fletcher_4_avx512f_valid(void)
162 return (kfpu_allowed() && zfs_avx512f_available());
165 const fletcher_4_ops_t fletcher_4_avx512f_ops = {
166 .init_native = fletcher_4_avx512f_init,
167 .fini_native = fletcher_4_avx512f_fini,
168 .compute_native = fletcher_4_avx512f_native,
169 .init_byteswap = fletcher_4_avx512f_init,
170 .fini_byteswap = fletcher_4_avx512f_fini,
171 .compute_byteswap = fletcher_4_avx512f_byteswap,
172 .valid = fletcher_4_avx512f_valid,
173 .name = "avx512f"
176 #if defined(HAVE_AVX512BW)
177 static void
178 fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
179 uint64_t size)
181 static const zfs_fletcher_avx512_t mask = {
182 .v = { 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
183 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
184 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B,
185 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B }
187 const uint32_t *ip = buf;
188 const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
190 kfpu_begin();
192 FLETCHER_4_AVX512_RESTORE_CTX(ctx);
194 __asm("vmovdqu64 %0, %%zmm5" :: "m" (mask));
196 for (; ip < ipend; ip += 8) {
197 __asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
199 __asm("vpshufb %zmm5, %zmm4, %zmm4");
201 __asm("vpaddq %zmm4, %zmm0, %zmm0");
202 __asm("vpaddq %zmm0, %zmm1, %zmm1");
203 __asm("vpaddq %zmm1, %zmm2, %zmm2");
204 __asm("vpaddq %zmm2, %zmm3, %zmm3");
207 FLETCHER_4_AVX512_SAVE_CTX(ctx)
209 kfpu_end();
211 STACK_FRAME_NON_STANDARD(fletcher_4_avx512bw_byteswap);
213 const fletcher_4_ops_t fletcher_4_avx512bw_ops = {
214 .init_native = fletcher_4_avx512f_init,
215 .fini_native = fletcher_4_avx512f_fini,
216 .compute_native = fletcher_4_avx512f_native,
217 .init_byteswap = fletcher_4_avx512f_init,
218 .fini_byteswap = fletcher_4_avx512f_fini,
219 .compute_byteswap = fletcher_4_avx512bw_byteswap,
220 .valid = fletcher_4_avx512f_valid,
221 .name = "avx512bw"
223 #endif
225 #endif /* defined(__x86_64) && defined(HAVE_AVX512F) */