drm/rockchip: vop2: Fix the windows switch between different layers
[drm/drm-misc.git] / arch / csky / abiv2 / memcmp.S
blobbf0d809f09e225fd9600993f893ea8de7394a60c
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/linkage.h>
5 #include "sysdep.h"
7 ENTRY(memcmp)
8         /* Test if len less than 4 bytes.  */
9         mov     r3, r0
10         movi    r0, 0
11         mov     r12, r4
12         cmplti  r2, 4
13         bt      .L_compare_by_byte
15         andi    r13, r0, 3
16         movi    r19, 4
18         /* Test if s1 is not 4 bytes aligned.  */
19         bnez    r13, .L_s1_not_aligned
21         LABLE_ALIGN
22 .L_s1_aligned:
23         /* If dest is aligned, then copy.  */
24         zext    r18, r2, 31, 4
25         /* Test if len less than 16 bytes.  */
26         bez     r18, .L_compare_by_word
28 .L_compare_by_4word:
29         /* If aligned, load word each time.  */
30         ldw     r20, (r3, 0)
31         ldw     r21, (r1, 0)
32         /* If s1[i] != s2[i], goto .L_byte_check.  */
33         cmpne   r20, r21
34         bt      .L_byte_check
36         ldw     r20, (r3, 4)
37         ldw     r21, (r1, 4)
38         cmpne   r20, r21
39         bt      .L_byte_check
41         ldw     r20, (r3, 8)
42         ldw     r21, (r1, 8)
43         cmpne   r20, r21
44         bt      .L_byte_check
46         ldw     r20, (r3, 12)
47         ldw     r21, (r1, 12)
48         cmpne   r20, r21
49         bt      .L_byte_check
51         PRE_BNEZAD (r18)
52         addi    a3, 16
53         addi    a1, 16
55         BNEZAD (r18, .L_compare_by_4word)
57 .L_compare_by_word:
58         zext    r18, r2, 3, 2
59         bez     r18, .L_compare_by_byte
60 .L_compare_by_word_loop:
61         ldw     r20, (r3, 0)
62         ldw     r21, (r1, 0)
63         addi    r3, 4
64         PRE_BNEZAD (r18)
65         cmpne   r20, r21
66         addi    r1, 4
67         bt      .L_byte_check
68         BNEZAD (r18, .L_compare_by_word_loop)
70 .L_compare_by_byte:
71         zext    r18, r2, 1, 0
72         bez     r18, .L_return
73 .L_compare_by_byte_loop:
74         ldb     r0, (r3, 0)
75         ldb     r4, (r1, 0)
76         addi    r3, 1
77         subu    r0, r4
78         PRE_BNEZAD (r18)
79         addi    r1, 1
80         bnez    r0, .L_return
81         BNEZAD (r18, .L_compare_by_byte_loop)
83 .L_return:
84         mov     r4, r12
85         rts
87 # ifdef __CSKYBE__
88 /* d[i] != s[i] in word, so we check byte 0.  */
89 .L_byte_check:
90         xtrb0   r0, r20
91         xtrb0   r2, r21
92         subu    r0, r2
93         bnez    r0, .L_return
95         /* check byte 1 */
96         xtrb1   r0, r20
97         xtrb1   r2, r21
98         subu    r0, r2
99         bnez    r0, .L_return
101         /* check byte 2 */
102         xtrb2   r0, r20
103         xtrb2   r2, r21
104         subu    r0, r2
105         bnez    r0, .L_return
107         /* check byte 3 */
108         xtrb3   r0, r20
109         xtrb3   r2, r21
110         subu    r0, r2
111 # else
112 /* s1[i] != s2[i] in word, so we check byte 3.  */
113 .L_byte_check:
114         xtrb3   r0, r20
115         xtrb3   r2, r21
116         subu    r0, r2
117         bnez    r0, .L_return
119         /* check byte 2 */
120         xtrb2   r0, r20
121         xtrb2   r2, r21
122         subu    r0, r2
123         bnez    r0, .L_return
125         /* check byte 1 */
126         xtrb1   r0, r20
127         xtrb1   r2, r21
128         subu    r0, r2
129         bnez    r0, .L_return
131         /* check byte 0 */
132         xtrb0   r0, r20
133         xtrb0   r2, r21
134         subu    r0, r2
135         br      .L_return
136 # endif /* !__CSKYBE__ */
138 /* Compare when s1 is not aligned.  */
139 .L_s1_not_aligned:
140         sub     r13, r19, r13
141         sub     r2, r13
142 .L_s1_not_aligned_loop:
143         ldb     r0, (r3, 0)
144         ldb     r4, (r1, 0)
145         addi    r3, 1
146         subu    r0, r4
147         PRE_BNEZAD (r13)
148         addi    r1, 1
149         bnez    r0, .L_return
150         BNEZAD (r13, .L_s1_not_aligned_loop)
151         br      .L_s1_aligned
152 ENDPROC(memcmp)