2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kernel.h>
26 #include <asm/fpu/api.h>
28 #include "i915_memcpy.h"
30 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
31 #define CI_BUG_ON(expr) BUG_ON(expr)
33 #define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
36 static DEFINE_STATIC_KEY_FALSE(has_movntdqa
);
38 static void __memcpy_ntdqa(void *dst
, const void *src
, unsigned long len
)
43 asm("movntdqa (%0), %%xmm0\n"
44 "movntdqa 16(%0), %%xmm1\n"
45 "movntdqa 32(%0), %%xmm2\n"
46 "movntdqa 48(%0), %%xmm3\n"
47 "movaps %%xmm0, (%1)\n"
48 "movaps %%xmm1, 16(%1)\n"
49 "movaps %%xmm2, 32(%1)\n"
50 "movaps %%xmm3, 48(%1)\n"
51 :: "r" (src
), "r" (dst
) : "memory");
57 asm("movntdqa (%0), %%xmm0\n"
58 "movaps %%xmm0, (%1)\n"
59 :: "r" (src
), "r" (dst
) : "memory");
67 static void __memcpy_ntdqu(void *dst
, const void *src
, unsigned long len
)
72 asm("movntdqa (%0), %%xmm0\n"
73 "movntdqa 16(%0), %%xmm1\n"
74 "movntdqa 32(%0), %%xmm2\n"
75 "movntdqa 48(%0), %%xmm3\n"
76 "movups %%xmm0, (%1)\n"
77 "movups %%xmm1, 16(%1)\n"
78 "movups %%xmm2, 32(%1)\n"
79 "movups %%xmm3, 48(%1)\n"
80 :: "r" (src
), "r" (dst
) : "memory");
86 asm("movntdqa (%0), %%xmm0\n"
87 "movups %%xmm0, (%1)\n"
88 :: "r" (src
), "r" (dst
) : "memory");
97 * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
98 * @dst: destination pointer
99 * @src: source pointer
100 * @len: how many bytes to copy
102 * i915_memcpy_from_wc copies @len bytes from @src to @dst using
103 * non-temporal instructions where available. Note that all arguments
104 * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
107 * To test whether accelerated reads from WC are supported, use
108 * i915_memcpy_from_wc(NULL, NULL, 0);
110 * Returns true if the copy was successful, false if the preconditions
113 bool i915_memcpy_from_wc(void *dst
, const void *src
, unsigned long len
)
115 if (unlikely(((unsigned long)dst
| (unsigned long)src
| len
) & 15))
118 if (static_branch_likely(&has_movntdqa
)) {
120 __memcpy_ntdqa(dst
, src
, len
>> 4);
128 * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
129 * @dst: destination pointer
130 * @src: source pointer
131 * @len: how many bytes to copy
133 * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
134 * @src to @dst using * non-temporal instructions where available, but
135 * accepts that its arguments may not be aligned, but are valid for the
136 * potential 16-byte read past the end.
138 void i915_unaligned_memcpy_from_wc(void *dst
, void *src
, unsigned long len
)
142 CI_BUG_ON(!i915_has_memcpy_from_wc());
144 addr
= (unsigned long)src
;
145 if (!IS_ALIGNED(addr
, 16)) {
146 unsigned long x
= min(ALIGN(addr
, 16) - addr
, len
);
156 __memcpy_ntdqu(dst
, src
, DIV_ROUND_UP(len
, 16));
159 void i915_memcpy_init_early(struct drm_i915_private
*dev_priv
)
162 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
163 * emulation. So don't enable movntdqa in hypervisor guest.
165 if (static_cpu_has(X86_FEATURE_XMM4_1
) &&
166 !boot_cpu_has(X86_FEATURE_HYPERVISOR
))
167 static_branch_enable(&has_movntdqa
);