mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / microblaze / lib / memmove.c
blob3611ce70415bfa720244d1a84c1324ab8fc10b9d
1 /*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2007 John Williams
6 * Reasonably optimised generic C-code for memcpy on Microblaze
7 * This is generic C code to do efficient, alignment-aware memmove.
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice.
16 * This software has been developed by Intel Corporation.
17 * Intel specifically disclaims all warranties, express or
18 * implied, and all liability, including consequential and
19 * other indirect damages, for the use of this program, including
20 * liability for infringement of any proprietary rights,
21 * and including the warranties of merchantability and fitness
22 * for a particular purpose. Intel does not assume any
23 * responsibility for and errors which may appear in this program
24 * not any responsibility to update it.
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/stddef.h>
30 #include <linux/compiler.h>
31 #include <linux/string.h>
33 #ifdef __HAVE_ARCH_MEMMOVE
34 #ifndef CONFIG_OPT_LIB_FUNCTION
35 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
37 const char *src = v_src;
38 char *dst = v_dst;
40 if (!c)
41 return v_dst;
43 /* Use memcpy when source is higher than dest */
44 if (v_dst <= v_src)
45 return memcpy(v_dst, v_src, c);
47 /* copy backwards, from end to beginning */
48 src += c;
49 dst += c;
51 /* Simple, byte oriented memmove. */
52 while (c--)
53 *--dst = *--src;
55 return v_dst;
57 #else /* CONFIG_OPT_LIB_FUNCTION */
58 void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
60 const char *src = v_src;
61 char *dst = v_dst;
62 const uint32_t *i_src;
63 uint32_t *i_dst;
65 if (!c)
66 return v_dst;
68 /* Use memcpy when source is higher than dest */
69 if (v_dst <= v_src)
70 return memcpy(v_dst, v_src, c);
72 /* The following code tries to optimize the copy by using unsigned
73 * alignment. This will work fine if both source and destination are
74 * aligned on the same boundary. However, if they are aligned on
75 * different boundaries shifts will be necessary. This might result in
76 * bad performance on MicroBlaze systems without a barrel shifter.
78 /* FIXME this part needs more test */
79 /* Do a descending copy - this is a bit trickier! */
80 dst += c;
81 src += c;
83 if (c >= 4) {
84 unsigned value, buf_hold;
86 /* Align the destination to a word boundary. */
87 /* This is done in an endian independent manner. */
89 switch ((unsigned long)dst & 3) {
90 case 3:
91 *--dst = *--src;
92 --c;
93 case 2:
94 *--dst = *--src;
95 --c;
96 case 1:
97 *--dst = *--src;
98 --c;
101 i_dst = (void *)dst;
102 /* Choose a copy scheme based on the source */
103 /* alignment relative to dstination. */
104 switch ((unsigned long)src & 3) {
105 case 0x0: /* Both byte offsets are aligned */
107 i_src = (const void *)src;
109 for (; c >= 4; c -= 4)
110 *--i_dst = *--i_src;
112 src = (const void *)i_src;
113 break;
114 case 0x1: /* Unaligned - Off by 1 */
115 /* Word align the source */
116 i_src = (const void *) (((unsigned)src + 4) & ~3);
117 #ifndef __MICROBLAZEEL__
118 /* Load the holding buffer */
119 buf_hold = *--i_src >> 24;
121 for (; c >= 4; c -= 4) {
122 value = *--i_src;
123 *--i_dst = buf_hold << 8 | value;
124 buf_hold = value >> 24;
126 #else
127 /* Load the holding buffer */
128 buf_hold = (*--i_src & 0xFF) << 24;
130 for (; c >= 4; c -= 4) {
131 value = *--i_src;
132 *--i_dst = buf_hold |
133 ((value & 0xFFFFFF00) >> 8);
134 buf_hold = (value & 0xFF) << 24;
136 #endif
137 /* Realign the source */
138 src = (const void *)i_src;
139 src += 1;
140 break;
141 case 0x2: /* Unaligned - Off by 2 */
142 /* Word align the source */
143 i_src = (const void *) (((unsigned)src + 4) & ~3);
144 #ifndef __MICROBLAZEEL__
145 /* Load the holding buffer */
146 buf_hold = *--i_src >> 16;
148 for (; c >= 4; c -= 4) {
149 value = *--i_src;
150 *--i_dst = buf_hold << 16 | value;
151 buf_hold = value >> 16;
153 #else
154 /* Load the holding buffer */
155 buf_hold = (*--i_src & 0xFFFF) << 16;
157 for (; c >= 4; c -= 4) {
158 value = *--i_src;
159 *--i_dst = buf_hold |
160 ((value & 0xFFFF0000) >> 16);
161 buf_hold = (value & 0xFFFF) << 16;
163 #endif
164 /* Realign the source */
165 src = (const void *)i_src;
166 src += 2;
167 break;
168 case 0x3: /* Unaligned - Off by 3 */
169 /* Word align the source */
170 i_src = (const void *) (((unsigned)src + 4) & ~3);
171 #ifndef __MICROBLAZEEL__
172 /* Load the holding buffer */
173 buf_hold = *--i_src >> 8;
175 for (; c >= 4; c -= 4) {
176 value = *--i_src;
177 *--i_dst = buf_hold << 24 | value;
178 buf_hold = value >> 8;
180 #else
181 /* Load the holding buffer */
182 buf_hold = (*--i_src & 0xFFFFFF) << 8;
184 for (; c >= 4; c -= 4) {
185 value = *--i_src;
186 *--i_dst = buf_hold |
187 ((value & 0xFF000000) >> 24);
188 buf_hold = (value & 0xFFFFFF) << 8;
190 #endif
191 /* Realign the source */
192 src = (const void *)i_src;
193 src += 3;
194 break;
196 dst = (void *)i_dst;
199 /* simple fast copy, ... unless a cache boundary is crossed */
200 /* Finish off any remaining bytes */
201 switch (c) {
202 case 4:
203 *--dst = *--src;
204 case 3:
205 *--dst = *--src;
206 case 2:
207 *--dst = *--src;
208 case 1:
209 *--dst = *--src;
211 return v_dst;
213 #endif /* CONFIG_OPT_LIB_FUNCTION */
214 EXPORT_SYMBOL(memmove);
215 #endif /* __HAVE_ARCH_MEMMOVE */