2 * arch/xtensa/lib/strncpy_user.S
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
8 * Returns: -EFAULT if exception before terminator, N if the entire
9 * buffer filled, else strlen.
11 * Copyright (C) 2002 Tensilica Inc.
14 #include <linux/errno.h>
15 #include <linux/linkage.h>
16 #include <asm/asmmacro.h>
20 * char *__strncpy_user(char *dst, const char *src, size_t len)
24 # define MASK0 0xff000000
25 # define MASK1 0x00ff0000
26 # define MASK2 0x0000ff00
27 # define MASK3 0x000000ff
29 # define MASK0 0x000000ff
30 # define MASK1 0x0000ff00
31 # define MASK2 0x00ff0000
32 # define MASK3 0xff000000
54 # a2/ dst, a3/ src, a4/ len
55 mov a11, a2 # leave dst in return value register
56 beqz a4, .Lret # if len is zero
57 movi a5, MASK0 # mask for byte 0
58 movi a6, MASK1 # mask for byte 1
59 movi a7, MASK2 # mask for byte 2
60 movi a8, MASK3 # mask for byte 3
61 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
62 bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
63 .Lsrcaligned: # return here when src is word-aligned
64 srli a12, a4, 2 # number of loop iterations with 4B per loop
66 bnone a11, a9, .Laligned
69 .Lsrc1mod2: # src address is odd
70 EX(11f) l8ui a9, a3, 0 # get byte 0
71 addi a3, a3, 1 # advance src pointer
72 EX(10f) s8i a9, a11, 0 # store byte 0
73 beqz a9, .Lret # if byte 0 is zero
74 addi a11, a11, 1 # advance dst pointer
75 addi a4, a4, -1 # decrement len
76 beqz a4, .Lret # if len is zero
77 bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
79 .Lsrc2mod4: # src address is 2 mod 4
80 EX(11f) l8ui a9, a3, 0 # get byte 0
81 /* 1-cycle interlock */
82 EX(10f) s8i a9, a11, 0 # store byte 0
83 beqz a9, .Lret # if byte 0 is zero
84 addi a11, a11, 1 # advance dst pointer
85 addi a4, a4, -1 # decrement len
86 beqz a4, .Lret # if len is zero
87 EX(11f) l8ui a9, a3, 1 # get byte 0
88 addi a3, a3, 2 # advance src pointer
89 EX(10f) s8i a9, a11, 0 # store byte 0
90 beqz a9, .Lret # if byte 0 is zero
91 addi a11, a11, 1 # advance dst pointer
92 addi a4, a4, -1 # decrement len
93 bnez a4, .Lsrcaligned # if len is nonzero
95 sub a2, a11, a2 # compute strlen
99 * dst is word-aligned, src is word-aligned
101 .align 4 # 1 mod 4 alignment for LOOPNEZ
102 .byte 0 # (0 mod 4 alignment for LBEG)
105 loopnez a12, .Loop1done
109 add a12, a12, a11 # a12 = end of last 4B chunck
112 EX(11f) l32i a9, a3, 0 # get word from src
113 addi a3, a3, 4 # advance src pointer
114 bnone a9, a5, .Lz0 # if byte 0 is zero
115 bnone a9, a6, .Lz1 # if byte 1 is zero
116 bnone a9, a7, .Lz2 # if byte 2 is zero
117 EX(10f) s32i a9, a11, 0 # store word to dst
118 bnone a9, a8, .Lz3 # if byte 3 is zero
119 addi a11, a11, 4 # advance dst pointer
120 #if !XCHAL_HAVE_LOOPS
127 EX(11f) l16ui a9, a3, 0
128 addi a3, a3, 2 # advance src pointer
130 bnone a9, a7, .Lz0 # if byte 2 is zero
131 bnone a9, a8, .Lz1 # if byte 3 is zero
133 bnone a9, a5, .Lz0 # if byte 0 is zero
134 bnone a9, a6, .Lz1 # if byte 1 is zero
136 EX(10f) s16i a9, a11, 0
137 addi a11, a11, 2 # advance dst pointer
140 EX(11f) l8ui a9, a3, 0
142 EX(10f) s8i a9, a11, 0
143 beqz a9, .Lret # if byte is zero
144 addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
145 # the effect of adding 3 in .Lz3 code
146 /* fall thru to .Lz3 and "retw" */
148 .Lz3: # byte 3 is zero
149 addi a11, a11, 3 # advance dst pointer
150 sub a2, a11, a2 # compute strlen
152 .Lz0: # byte 0 is zero
155 #endif /* __XTENSA_EB__ */
156 EX(10f) s8i a9, a11, 0
157 sub a2, a11, a2 # compute strlen
159 .Lz1: # byte 1 is zero
162 #endif /* __XTENSA_EB__ */
163 EX(10f) s16i a9, a11, 0
164 addi a11, a11, 1 # advance dst pointer
165 sub a2, a11, a2 # compute strlen
167 .Lz2: # byte 2 is zero
170 #endif /* __XTENSA_EB__ */
171 EX(10f) s16i a9, a11, 0
173 EX(10f) s8i a9, a11, 2
174 addi a11, a11, 2 # advance dst pointer
175 sub a2, a11, a2 # compute strlen
178 .align 4 # 1 mod 4 alignment for LOOPNEZ
179 .byte 0 # (0 mod 4 alignment for LBEG)
182 * for now just use byte copy loop
185 loopnez a4, .Lunalignedend
187 beqz a4, .Lunalignedend
188 add a12, a11, a4 # a12 = ending address
189 #endif /* XCHAL_HAVE_LOOPS */
191 EX(11f) l8ui a9, a3, 0
193 EX(10f) s8i a9, a11, 0
194 beqz a9, .Lunalignedend
196 #if !XCHAL_HAVE_LOOPS
197 blt a11, a12, .Lnextbyte
201 sub a2, a11, a2 # compute strlen
204 ENDPROC(__strncpy_user)
206 .section .fixup, "ax"
209 /* For now, just return -EFAULT. Future implementations might
210 * like to clear remaining kernel space, like the fixup
211 * implementation in memset(). Thus, we differentiate between
212 * load/store fixups. */