1 /* checksum.S: Sparc V9 optimized checksum code.
3 * Copyright(C) 1995 Linus Torvalds
4 * Copyright(C) 1995 Miguel de Icaza
5 * Copyright(C) 1996, 2000 David S. Miller
6 * Copyright(C) 1997 Jakub Jelinek
9 * Linux/Alpha checksum c-code
10 * Linux/ix86 inline checksum assembly
11 * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
12 * David Mosberger-Tang for optimized reference c-code
13 * BSD4.4 portable checksum routine
16 #include <asm/export.h>
19 csum_partial_fix_alignment:
20 /* We checked for zero length already, so there must be
25 ldub [%o0 + 0x00], %o4
28 1: andcc %o0, 0x2, %g0
29 be,pn %icc, csum_partial_post_align
31 blu,pn %icc, csum_partial_end_cruft
33 lduh [%o0 + 0x00], %o5
36 ba,pt %xcc, csum_partial_post_align
41 EXPORT_SYMBOL(csum_partial)
42 csum_partial: /* %o0=buff, %o1=len, %o2=sum */
43 prefetch [%o0 + 0x000], #n_reads
45 prefetch [%o0 + 0x040], #n_reads
46 brz,pn %o1, csum_partial_finish
49 /* We "remember" whether the lowest bit in the address
50 * was set in %g7. Because if it is, we have to swap
51 * upper and lower 8 bit fields of the sum we calculate.
53 bne,pn %icc, csum_partial_fix_alignment
56 csum_partial_post_align:
57 prefetch [%o0 + 0x080], #n_reads
60 prefetch [%o0 + 0x0c0], #n_reads
63 prefetch [%o0 + 0x100], #n_reads
65 /* So that we don't need to use the non-pairing
66 * add-with-carry instructions we accumulate 32-bit
67 * values into a 64-bit register. At the end of the
68 * loop we fold it down to 32-bits and so on.
70 prefetch [%o0 + 0x140], #n_reads
71 1: lduw [%o0 + 0x00], %o5
72 lduw [%o0 + 0x04], %g1
73 lduw [%o0 + 0x08], %g2
75 lduw [%o0 + 0x0c], %g3
77 lduw [%o0 + 0x10], %o5
79 lduw [%o0 + 0x14], %g1
81 lduw [%o0 + 0x18], %g2
83 lduw [%o0 + 0x1c], %g3
85 lduw [%o0 + 0x20], %o5
87 lduw [%o0 + 0x24], %g1
89 lduw [%o0 + 0x28], %g2
91 lduw [%o0 + 0x2c], %g3
93 lduw [%o0 + 0x30], %o5
95 lduw [%o0 + 0x34], %g1
97 lduw [%o0 + 0x38], %g2
99 lduw [%o0 + 0x3c], %g3
101 prefetch [%o0 + 0x180], #n_reads
108 2: and %o1, 0x3c, %o3
111 1: lduw [%o0 + 0x00], %o5
127 sethi %hi(0xffff0000), %g1
135 csum_partial_end_cruft:
136 /* %o4 has the 16-bit sum we have calculated so-far. */
140 lduh [%o0 + 0x00], %o5
146 ldub [%o0 + 0x00], %o5
153 sethi %hi(0xffff0000), %g1
164 /* We started with an odd byte, byte-swap the result. */
170 1: addcc %o2, %o4, %o2