2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/types.h>
16 #include <linux/string.h>
17 #include <linux/module.h>
18 /* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
20 /* Must be 8 bytes in size. */
23 /* Threshold value for when to enter the unrolled loops. */
26 #if CHIP_L2_LINE_SIZE() != 64
27 #error "Assumes 64 byte line size"
30 /* How many cache lines ahead should we prefetch? */
31 #define PREFETCH_LINES_AHEAD 4
34 * Provide "base versions" of load and store for the normal code path.
35 * The kernel provides other versions for userspace copies.
37 #define ST(p, v) (*(p) = (v))
50 void *memcpy(void *__restrict dstv
, const void *__restrict srcv
, size_t n
)
53 * Special kernel version will provide implementation of the LDn/STn
54 * macros to return a count of uncopied bytes due to mm fault.
57 int __attribute__((optimize("omit-frame-pointer")))
58 USERCOPY_FUNC(void *__restrict dstv
, const void *__restrict srcv
, size_t n
)
61 char *__restrict dst1
= (char *)dstv
;
62 const char *__restrict src1
= (const char *)srcv
;
63 const char *__restrict src1_end
;
64 const char *__restrict prefetch
;
65 op_t
*__restrict dst8
; /* 8-byte pointer to destination memory. */
66 op_t final
; /* Final bytes to write to trailing word, if any */
71 ST1(dst1
++, LD1(src1
++));
76 * Locate the end of source memory we will copy. Don't
79 src1_end
= src1
+ n
- 1;
81 /* Prefetch ahead a few cache lines, but not past the end. */
83 for (i
= 0; i
< PREFETCH_LINES_AHEAD
; i
++) {
84 __insn_prefetch(prefetch
);
85 prefetch
+= CHIP_L2_LINE_SIZE();
86 prefetch
= (prefetch
< src1_end
) ? prefetch
: src1
;
89 /* Copy bytes until dst is word-aligned. */
90 for (; (uintptr_t)dst1
& (sizeof(op_t
) - 1); n
--)
91 ST1(dst1
++, LD1(src1
++));
93 /* 8-byte pointer to destination memory. */
96 if (__builtin_expect((uintptr_t)src1
& (sizeof(op_t
) - 1), 0)) {
99 op_t tmp0
= 0, tmp1
= 0, tmp2
, tmp3
;
100 const op_t
*src8
= (const op_t
*) ((uintptr_t)src1
&
102 const void *srci
= (void *)src1
;
105 m
= (CHIP_L2_LINE_SIZE() << 2) -
106 (((uintptr_t)dst8
) & ((CHIP_L2_LINE_SIZE() << 2) - 1));
110 /* Copy until 'dst' is cache-line-aligned. */
111 n
-= (sizeof(op_t
) * m
);
115 if (__builtin_expect(!m
, 0))
134 if (__builtin_expect(!m
, 0))
140 tmp0
= __insn_dblalign(tmp0
, tmp1
, srci
);
144 tmp1
= __insn_dblalign(tmp1
, tmp2
, srci
);
148 tmp2
= __insn_dblalign(tmp2
, tmp3
, srci
);
152 tmp3
= __insn_dblalign(tmp3
, tmp0
, srci
);
158 tmp0
= __insn_dblalign(tmp0
, tmp1
, srci
);
163 if (__builtin_expect(n
>= CHIP_L2_LINE_SIZE(), 0)) {
164 op_t tmp4
, tmp5
, tmp6
, tmp7
, tmp8
;
166 prefetch
= ((const char *)src8
) +
167 CHIP_L2_LINE_SIZE() * PREFETCH_LINES_AHEAD
;
169 for (tmp0
= LD8(src8
++); n
>= CHIP_L2_LINE_SIZE();
170 n
-= CHIP_L2_LINE_SIZE()) {
171 /* Prefetch and advance to next line to
172 prefetch, but don't go past the end. */
173 __insn_prefetch(prefetch
);
175 /* Make sure prefetch got scheduled
177 __asm__ ("" : : : "memory");
179 prefetch
+= CHIP_L2_LINE_SIZE();
180 prefetch
= (prefetch
< src1_end
) ? prefetch
:
192 tmp0
= __insn_dblalign(tmp0
, tmp1
, srci
);
193 tmp1
= __insn_dblalign(tmp1
, tmp2
, srci
);
194 tmp2
= __insn_dblalign(tmp2
, tmp3
, srci
);
195 tmp3
= __insn_dblalign(tmp3
, tmp4
, srci
);
196 tmp4
= __insn_dblalign(tmp4
, tmp5
, srci
);
197 tmp5
= __insn_dblalign(tmp5
, tmp6
, srci
);
198 tmp6
= __insn_dblalign(tmp6
, tmp7
, srci
);
199 tmp7
= __insn_dblalign(tmp7
, tmp8
, srci
);
217 /* Copy the rest 8-byte chunks. */
218 if (n
>= sizeof(op_t
)) {
220 for (; n
>= sizeof(op_t
); n
-= sizeof(op_t
)) {
222 tmp0
= __insn_dblalign(tmp0
, tmp1
, srci
);
233 tmp1
= ((const char *)src8
<= src1_end
)
234 ? LD8((op_t
*)src8
) : 0;
235 final
= __insn_dblalign(tmp0
, tmp1
, srci
);
240 const op_t
*__restrict src8
= (const op_t
*)src1
;
242 /* src8 and dst8 are both word-aligned. */
243 if (n
>= CHIP_L2_LINE_SIZE()) {
244 /* Copy until 'dst' is cache-line-aligned. */
245 for (; (uintptr_t)dst8
& (CHIP_L2_LINE_SIZE() - 1);
247 ST8(dst8
++, LD8(src8
++));
249 for (; n
>= CHIP_L2_LINE_SIZE(); ) {
250 op_t tmp0
, tmp1
, tmp2
, tmp3
;
251 op_t tmp4
, tmp5
, tmp6
, tmp7
;
254 * Prefetch and advance to next line
255 * to prefetch, but don't go past the
258 __insn_prefetch(prefetch
);
260 /* Make sure prefetch got scheduled
262 __asm__ ("" : : : "memory");
264 prefetch
+= CHIP_L2_LINE_SIZE();
265 prefetch
= (prefetch
< src1_end
) ? prefetch
:
269 * Do all the loads before wh64. This
270 * is necessary if [src8, src8+7] and
271 * [dst8, dst8+7] share the same cache
272 * line and dst8 <= src8, as can be
273 * the case when called from memmove,
274 * or with code tested on x86 whose
275 * memcpy always works with forward
287 /* wh64 and wait for tmp7 load completion. */
288 __asm__ ("move %0, %0; wh64 %1\n"
289 : : "r"(tmp7
), "r"(dst8
));
300 n
-= CHIP_L2_LINE_SIZE();
302 #if CHIP_L2_LINE_SIZE() != 64
303 # error "Fix code that assumes particular L2 cache line size."
307 for (; n
>= sizeof(op_t
); n
-= sizeof(op_t
))
308 ST8(dst8
++, LD8(src8
++));
310 if (__builtin_expect(n
== 0, 1))
316 /* n != 0 if we get here. Write out any trailing bytes. */
318 #ifndef __BIG_ENDIAN__
320 ST4((uint32_t *)dst1
, final
);
326 ST2((uint16_t *)dst1
, final
);
332 ST1((uint8_t *)dst1
, final
);
335 ST4((uint32_t *)dst1
, final
>> 32);
343 ST2((uint16_t *)dst1
, final
>> 16);
351 ST1((uint8_t *)dst1
, final
>> 8);