1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/lib/memcpy.c
5 * Copyright (C) 1995 Linus Torvalds
9 * This is a reasonably optimized memcpy() routine.
13 * Note that the C code is written to be optimized into good assembly. However,
14 * at this point gcc is unable to sanely compile "if (n >= 0)", resulting in a
15 * explicit compare against 0 (instead of just using the proper "blt reg, xx" or
16 * "bge reg, xx"). I hope alpha-gcc will be fixed to notice this eventually..
19 #include <linux/types.h>
20 #include <linux/export.h>
23 * This should be done in one go with ldq_u*2/mask/stq_u. Do it
24 * with a macro so that we can fix it up later..
26 #define ALIGN_DEST_TO8_UP(d,s,n) \
30 *(char *) d = *(char *) s; \
33 #define ALIGN_DEST_TO8_DN(d,s,n) \
38 *(char *) d = *(char *) s; \
42 * This should similarly be done with ldq_u*2/mask/stq. The destination
43 * is aligned, but we don't fill in a full quad-word
45 #define DO_REST_UP(d,s,n) \
48 *(char *) d = *(char *) s; \
51 #define DO_REST_DN(d,s,n) \
55 *(char *) d = *(char *) s; \
59 * This should be done with ldq/mask/stq. The source and destination are
60 * aligned, but we don't fill in a full quad-word
62 #define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n)
63 #define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n)
66 * This does unaligned memory copies. We want to avoid storing to
67 * an unaligned address, as that would do a read-modify-write cycle.
68 * We also want to avoid double-reading the unaligned reads.
70 * Note the ordering to try to avoid load (and address generation) latencies.
72 static inline void __memcpy_unaligned_up (unsigned long d
, unsigned long s
,
75 ALIGN_DEST_TO8_UP(d
,s
,n
);
76 n
-= 8; /* to avoid compare against 8 in the loop */
78 unsigned long low_word
, high_word
;
79 __asm__("ldq_u %0,%1":"=r" (low_word
):"m" (*(unsigned long *) s
));
82 __asm__("ldq_u %0,%1":"=r" (high_word
):"m" (*(unsigned long *)(s
+8)));
84 __asm__("extql %1,%2,%0"
86 :"r" (low_word
), "r" (s
));
87 __asm__("extqh %1,%2,%0"
89 :"r" (high_word
), "r" (s
));
91 *(unsigned long *) d
= low_word
| tmp
;
100 static inline void __memcpy_unaligned_dn (unsigned long d
, unsigned long s
,
103 /* I don't understand AXP assembler well enough for this. -Tim */
107 * (char *) --d
= * (char *) --s
;
111 * Hmm.. Strange. The __asm__ here is there to make gcc use an integer register
112 * for the load-store. I don't know why, but it would seem that using a floating
113 * point register for the move seems to slow things down (very small difference,
116 * Note the ordering to try to avoid load (and address generation) latencies.
118 static inline void __memcpy_aligned_up (unsigned long d
, unsigned long s
,
121 ALIGN_DEST_TO8_UP(d
,s
,n
);
125 __asm__("ldq %0,%1":"=r" (tmp
):"m" (*(unsigned long *) s
));
128 *(unsigned long *) d
= tmp
;
132 DO_REST_ALIGNED_UP(d
,s
,n
);
134 static inline void __memcpy_aligned_dn (unsigned long d
, unsigned long s
,
139 ALIGN_DEST_TO8_DN(d
,s
,n
);
144 __asm__("ldq %0,%1":"=r" (tmp
):"m" (*(unsigned long *) s
));
147 *(unsigned long *) d
= tmp
;
150 DO_REST_ALIGNED_DN(d
,s
,n
);
153 void * memcpy(void * dest
, const void *src
, size_t n
)
155 if (!(((unsigned long) dest
^ (unsigned long) src
) & 7)) {
156 __memcpy_aligned_up ((unsigned long) dest
, (unsigned long) src
,
160 __memcpy_unaligned_up ((unsigned long) dest
, (unsigned long) src
, n
);
163 EXPORT_SYMBOL(memcpy
);