1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
5 #ifndef _ASM_LOONGARCH_XOR_H
6 #define _ASM_LOONGARCH_XOR_H
8 #include <asm/cpu-features.h>
9 #include <asm/xor_simd.h>
11 #ifdef CONFIG_CPU_HAS_LSX
12 static struct xor_block_template xor_block_lsx
= {
20 #define XOR_SPEED_LSX() \
23 xor_speed(&xor_block_lsx); \
25 #else /* CONFIG_CPU_HAS_LSX */
26 #define XOR_SPEED_LSX()
27 #endif /* CONFIG_CPU_HAS_LSX */
29 #ifdef CONFIG_CPU_HAS_LASX
30 static struct xor_block_template xor_block_lasx
= {
38 #define XOR_SPEED_LASX() \
41 xor_speed(&xor_block_lasx); \
43 #else /* CONFIG_CPU_HAS_LASX */
44 #define XOR_SPEED_LASX()
45 #endif /* CONFIG_CPU_HAS_LASX */
48 * For grins, also test the generic routines.
50 * More importantly: it cannot be ruled out at this point of time, that some
51 * future (maybe reduced) models could run the vector algorithms slower than
52 * the scalar ones, maybe for errata or micro-op reasons. It may be
53 * appropriate to revisit this after one or two more uarch generations.
55 #include <asm-generic/xor.h>
57 #undef XOR_TRY_TEMPLATES
58 #define XOR_TRY_TEMPLATES \
60 xor_speed(&xor_block_8regs); \
61 xor_speed(&xor_block_8regs_p); \
62 xor_speed(&xor_block_32regs); \
63 xor_speed(&xor_block_32regs_p); \
68 #endif /* _ASM_LOONGARCH_XOR_H */