1 /*#************************************************************************#*/
2 /*#-------------------------------------------------------------------------*/
4 /*# FUNCTION NAME: memset() */
6 /*# PARAMETERS: void* dst; Destination address. */
7 /*# int c; Value of byte to write. */
8 /*# int len; Number of bytes to write. */
12 /*# DESCRIPTION: Sets the memory dst of length len bytes to c, as standard. */
13 /*# Framework taken from memcpy. This routine is */
14 /*# very sensitive to compiler changes in register allocation. */
15 /*# Should really be rewritten to avoid this problem. */
17 /*#-------------------------------------------------------------------------*/
21 /*# DATE NAME CHANGES */
22 /*# ---- ---- ------- */
23 /*# 990713 HP Tired of watching this function (or */
24 /*# really, the nonoptimized generic */
25 /*# implementation) take up 90% of simulator */
26 /*# output. Measurements needed. */
28 /*#-------------------------------------------------------------------------*/
30 #include <linux/types.h>
32 /* No, there's no macro saying 12*4, since it is "hard" to get it into
33 the asm in a good way. Thus better to expose the problem everywhere.
36 /* Assuming 1 cycle per dword written or read (ok, not really true), and
37 one per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1)
38 so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */
40 #define ZERO_BLOCK_SIZE (1*12*4)
42 void *memset(void *pdst
,
46 /* Ok. Now we want the parameters put in special registers.
47 Make sure the compiler is able to make something useful of this. */
49 register char *return_dst
__asm__ ("r10") = pdst
;
50 register int n
__asm__ ("r12") = plen
;
51 register int lc
__asm__ ("r11") = c
;
53 /* Most apps use memset sanely. Only those memsetting about 3..4
54 bytes or less get penalized compared to the generic implementation
55 - and that's not really sane use. */
57 /* Ugh. This is fragile at best. Check with newer GCC releases, if
58 they compile cascaded "x |= x << 8" sanely! */
59 __asm__("movu.b %0,$r13 \n\
65 : "=r" (lc
) : "0" (lc
) : "r13");
68 register char *dst
__asm__ ("r13") = pdst
;
70 /* This is NONPORTABLE, but since this whole routine is */
71 /* grossly nonportable that doesn't matter. */
73 if (((unsigned long) pdst
& 3) != 0
74 /* Oops! n=0 must be a legal call, regardless of alignment. */
77 if ((unsigned long)dst
& 1)
84 if ((unsigned long)dst
& 2)
92 /* Now the fun part. For the threshold value of this, check the equation
94 /* Decide which copying method to use. */
95 if (n
>= ZERO_BLOCK_SIZE
)
97 /* For large copies we use 'movem' */
99 /* It is not optimal to tell the compiler about clobbering any
100 registers; that will move the saving/restoring of those registers
101 to the function prologue/epilogue, and make non-movem sizes
104 This method is not foolproof; it assumes that the "asm reg"
105 declarations at the beginning of the function really are used
106 here (beware: they may be moved to temporary registers).
107 This way, we do not have to save/move the registers around into
108 temporaries; we can safely use them straight away.
110 If you want to check that the allocation was right; then
111 check the equalities in the first comment. It should say
112 "r13=r13, r12=r12, r11=r11" */
113 __asm__
volatile (" \n\
114 ;; Check that the register asm declaration got right. \n\
115 ;; The GCC manual says it will work, but there *has* been bugs. \n\
116 .ifnc %0-%1-%4,$r13-$r12-$r11 \n\
120 ;; Save the registers we'll clobber in the movem process \n\
121 ;; on the stack. Don't mention them to gcc, it will only be \n\
138 ;; Now we've got this: \n\
142 ;; Update n for the first loop \n\
147 movem $r11,[$r13+] \n\
149 addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
151 ;; Restore registers from stack \n\
154 /* Outputs */ : "=r" (dst
), "=r" (n
)
155 /* Inputs */ : "0" (dst
), "1" (n
), "r" (lc
));
158 /* Either we directly starts copying, using dword copying
159 in a loop, or we copy as much as possible with 'movem'
160 and then the last block (<44 bytes) is copied here.
161 This will work since 'movem' will have updated src,dst,n. */
165 *((long*)dst
)++ = lc
;
166 *((long*)dst
)++ = lc
;
167 *((long*)dst
)++ = lc
;
168 *((long*)dst
)++ = lc
;
172 /* A switch() is definitely the fastest although it takes a LOT of code.
173 * Particularly if you inline code this.
180 *(char*)dst
= (char) lc
;
183 *(short*)dst
= (short) lc
;
186 *((short*)dst
)++ = (short) lc
;
187 *(char*)dst
= (char) lc
;
190 *((long*)dst
)++ = lc
;
193 *((long*)dst
)++ = lc
;
194 *(char*)dst
= (char) lc
;
197 *((long*)dst
)++ = lc
;
198 *(short*)dst
= (short) lc
;
201 *((long*)dst
)++ = lc
;
202 *((short*)dst
)++ = (short) lc
;
203 *(char*)dst
= (char) lc
;
206 *((long*)dst
)++ = lc
;
207 *((long*)dst
)++ = lc
;
210 *((long*)dst
)++ = lc
;
211 *((long*)dst
)++ = lc
;
212 *(char*)dst
= (char) lc
;
215 *((long*)dst
)++ = lc
;
216 *((long*)dst
)++ = lc
;
217 *(short*)dst
= (short) lc
;
220 *((long*)dst
)++ = lc
;
221 *((long*)dst
)++ = lc
;
222 *((short*)dst
)++ = (short) lc
;
223 *(char*)dst
= (char) lc
;
226 *((long*)dst
)++ = lc
;
227 *((long*)dst
)++ = lc
;
228 *((long*)dst
)++ = lc
;
231 *((long*)dst
)++ = lc
;
232 *((long*)dst
)++ = lc
;
233 *((long*)dst
)++ = lc
;
234 *(char*)dst
= (char) lc
;
237 *((long*)dst
)++ = lc
;
238 *((long*)dst
)++ = lc
;
239 *((long*)dst
)++ = lc
;
240 *(short*)dst
= (short) lc
;
243 *((long*)dst
)++ = lc
;
244 *((long*)dst
)++ = lc
;
245 *((long*)dst
)++ = lc
;
246 *((short*)dst
)++ = (short) lc
;
247 *(char*)dst
= (char) lc
;
252 return return_dst
; /* destination pointer. */