2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2007 John Williams
6 * Reasonably optimised generic C-code for memcpy on Microblaze
7 * This is generic C code to do efficient, alignment-aware memmove.
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original
16 * This software has been developed by Intel Corporation.
17 * Intel specifically disclaims all warranties, express or
18 * implied, and all liability, including consequential and
19 * other indirect damages, for the use of this program, including
20 * liability for infringement of any proprietary rights,
21 * and including the warranties of merchantability and fitness
22 * for a particular purpose. Intel does not assume any
23 * responsibility for and errors which may appear in this program
24 * not any responsibility to update it.
27 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/compiler.h>
30 #include <linux/module.h>
31 #include <linux/string.h>
33 #ifdef __HAVE_ARCH_MEMMOVE
34 void *memmove(void *v_dst
, const void *v_src
, __kernel_size_t c
)
36 const char *src
= v_src
;
39 #ifdef CONFIG_OPT_LIB_FUNCTION
40 const uint32_t *i_src
;
47 /* Use memcpy when source is higher than dest */
49 return memcpy(v_dst
, v_src
, c
);
51 #ifndef CONFIG_OPT_LIB_FUNCTION
52 /* copy backwards, from end to beginning */
56 /* Simple, byte oriented memmove. */
62 /* The following code tries to optimize the copy by using unsigned
63 * alignment. This will work fine if both source and destination are
64 * aligned on the same boundary. However, if they are aligned on
65 * different boundaries shifts will be necessary. This might result in
66 * bad performance on MicroBlaze systems without a barrel shifter.
68 /* FIXME this part needs more test */
69 /* Do a descending copy - this is a bit trickier! */
74 unsigned value
, buf_hold
;
76 /* Align the destination to a word boundry. */
77 /* This is done in an endian independant manner. */
79 switch ((unsigned long)dst
& 3) {
92 /* Choose a copy scheme based on the source */
93 /* alignment relative to dstination. */
94 switch ((unsigned long)src
& 3) {
95 case 0x0: /* Both byte offsets are aligned */
97 i_src
= (const void *)src
;
99 for (; c
>= 4; c
-= 4)
102 src
= (const void *)i_src
;
104 case 0x1: /* Unaligned - Off by 1 */
105 /* Word align the source */
106 i_src
= (const void *) (((unsigned)src
+ 4) & ~3);
108 /* Load the holding buffer */
109 buf_hold
= *--i_src
>> 24;
111 for (; c
>= 4; c
-= 4) {
113 *--i_dst
= buf_hold
<< 8 | value
;
114 buf_hold
= value
>> 24;
117 /* Realign the source */
118 src
= (const void *)i_src
;
121 case 0x2: /* Unaligned - Off by 2 */
122 /* Word align the source */
123 i_src
= (const void *) (((unsigned)src
+ 4) & ~3);
125 /* Load the holding buffer */
126 buf_hold
= *--i_src
>> 16;
128 for (; c
>= 4; c
-= 4) {
130 *--i_dst
= buf_hold
<< 16 | value
;
131 buf_hold
= value
>> 16;
134 /* Realign the source */
135 src
= (const void *)i_src
;
138 case 0x3: /* Unaligned - Off by 3 */
139 /* Word align the source */
140 i_src
= (const void *) (((unsigned)src
+ 4) & ~3);
142 /* Load the holding buffer */
143 buf_hold
= *--i_src
>> 8;
145 for (; c
>= 4; c
-= 4) {
147 *--i_dst
= buf_hold
<< 24 | value
;
148 buf_hold
= value
>> 8;
151 /* Realign the source */
152 src
= (const void *)i_src
;
159 /* simple fast copy, ... unless a cache boundry is crossed */
160 /* Finish off any remaining bytes */
174 EXPORT_SYMBOL(memmove
);
175 #endif /* __HAVE_ARCH_MEMMOVE */