1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Optmized version of the standard do_csum() function
6 * Return: a 64bit quantity containing the 16bit Internet checksum
9 * in0: address of buffer to checksum (char *)
10 * in1: length of the buffer (int)
12 * Copyright (C) 1999, 2001-2002 Hewlett-Packard Co
13 * Stephane Eranian <eranian@hpl.hp.com>
15 * 02/04/22 Ken Chen <kenneth.w.chen@intel.com>
16 * Data locality study on the checksum buffer.
17 * More optimization cleanup - remove excessive stop bits.
18 * 02/04/08 David Mosberger <davidm@hpl.hp.com>
19 * More cleanup and tuning.
20 * 01/04/18 Jun Nakajima <jun.nakajima@intel.com>
21 * Clean up and optimize and the software pipeline, loading two
22 * back-to-back 8-byte words per loop. Clean up the initialization
23 * for the loop. Support the cases where load latency = 1 or 2.
24 * Set CONFIG_IA64_LOAD_LATENCY to 1 or 2 (default).
27 #include <asm/asmmacro.h>
30 // Theory of operations:
31 // The goal is to go as quickly as possible to the point where
32 // we can checksum 16 bytes/loop. Before reaching that point we must
33 // take care of incorrect alignment of first byte.
35 // The code hereafter also takes care of the "tail" part of the buffer
36 // before entering the core loop, if any. The checksum is a sum so it
37 // allows us to commute operations. So we do the "head" and "tail"
38 // first to finish at full speed in the body. Once we get the head and
39 // tail values, we feed them into the pipeline, very handy initialization.
41 // Of course we deal with the special case where the whole buffer fits
42 // into one 8 byte word. In this case we have only one entry in the pipeline.
44 // We use a (LOAD_LATENCY+2)-stage pipeline in the loop to account for
45 // possible load latency and also to accommodate for head and tail.
47 // The end of the function deals with folding the checksum from 64bits
48 // down to 16bits taking care of the carry.
50 // This version avoids synchronization in the core loop by also using a
51 // pipeline for the accumulation of the checksum in resultx[] (x=1,2).
55 // | | 0 : new value loaded in pipeline
57 // | | - : in transit data
59 // | | LOAD_LATENCY : current value to add to checksum
61 // | | LOAD_LATENCY+1 : previous value added to checksum
62 // |---| (previous iteration)
66 // | | 0 : initial value
68 // | | LOAD_LATENCY-1 : new checksum
70 // | | LOAD_LATENCY : previous value of checksum
72 // | | LOAD_LATENCY+1 : final checksum when out of the loop
76 // See RFC1071 "Computing the Internet Checksum" for various techniques for
77 // calculating the Internet checksum.
80 // - Maybe another algorithm which would take care of the folding at the
81 // end in a different manner
82 // - Work with people more knowledgeable than me on the network stack
83 // to figure out if we could not split the function depending on the
84 // type of packet or alignment we get. Like the ip_fast_csum() routine
85 // where we know we have at least 20bytes worth of data to checksum.
86 // - Do a better job of handling small packets.
87 // - Note on prefetching: it was found that under various load, i.e. ftp read/write,
88 // nfs read/write, the L1 cache hit rate is at 60% and L2 cache hit rate is at 99.8%
89 // on the data that buffer points to (partly because the checksum is often preceded by
90 // a copy_from_user()). This finding indiate that lfetch will not be beneficial since
91 // the data is already in the cache.
115 #define LOAD_LATENCY 2 // XXX fix me
117 #if (LOAD_LATENCY != 1) && (LOAD_LATENCY != 2)
118 # error "Only 1 or 2 is supported/tested for LOAD_LATENCY."
121 #define PIPE_DEPTH (LOAD_LATENCY+2)
122 #define ELD p[LOAD_LATENCY] // end of load
123 #define ELD_1 p[LOAD_LATENCY+1] // and next stage
125 // unsigned long do_csum(unsigned char *buf,long len)
127 GLOBAL_ENTRY(do_csum)
129 .save ar.pfs, saved_pfs
130 alloc saved_pfs=ar.pfs,2,16,0,16
131 .rotr word1[4], word2[4],result1[LOAD_LATENCY+2],result2[LOAD_LATENCY+2]
132 .rotp p[PIPE_DEPTH], pC1[2], pC2[2]
133 mov ret0=r0 // in case we have zero length
134 cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len)
136 add tmp1=buf,len // last byte's address
138 mov saved_pr=pr // preserve predicates (rotation)
139 (p6) br.ret.spnt.many rp // return if zero or negative length
141 mov hmask=-1 // initialize head mask
142 tbit.nz p15,p0=buf,0 // is buf an odd address?
143 and first1=-8,buf // 8-byte align down address of first1 element
145 and firstoff=7,buf // how many bytes off for first1 element
146 mov tmask=-1 // initialize tail mask
149 adds tmp2=-1,tmp1 // last-1
150 and lastoff=7,tmp1 // how many bytes off for last element
152 sub tmp1=8,lastoff // complement to lastoff
153 and last=-8,tmp2 // address of word containing last byte
155 sub tmp3=last,first1 // tmp3=distance from first1 to last
156 .save ar.lc, saved_lc
157 mov saved_lc=ar.lc // save lc
158 cmp.eq p8,p9=last,first1 // everything fits in one word ?
160 ld8 firstval=[first1],8 // load, ahead of time, "first1" word
161 and tmp1=7, tmp1 // make sure that if tmp1==8 -> tmp1=0
162 shl tmp2=firstoff,3 // number of bits
164 (p9) ld8 lastval=[last] // load, ahead of time, "last" word, if needed
165 shl tmp1=tmp1,3 // number of bits
166 (p9) adds tmp3=-8,tmp3 // effectively loaded
168 (p8) mov lastval=r0 // we don't need lastval if first1==last
169 shl hmask=hmask,tmp2 // build head mask, mask off [0,first1off[
170 shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff]
175 (p8) and hmask=hmask,tmask // apply tail mask to head mask if 1 word only
176 (p9) and word2[0]=lastval,tmask // mask last it as appropriate
177 shr.u count=count,3 // how many 8-byte?
179 // If count is odd, finish this 8-byte word so that we can
180 // load two back-to-back 8-byte words per loop thereafter.
181 and word1[0]=firstval,hmask // and mask it as appropriate
182 tbit.nz p10,p11=count,0 // if (count is odd)
184 (p8) mov result1[0]=word1[0]
185 (p9) add result1[0]=word1[0],word2[0]
187 cmp.ltu p6,p0=result1[0],word1[0] // check the carry
188 cmp.eq.or.andcm p8,p0=0,count // exit if zero 8-byte
190 (p6) adds result1[0]=1,result1[0]
191 (p8) br.cond.dptk .do_csum_exit // if (within an 8-byte word)
192 (p11) br.cond.dptk .do_csum16 // if (count is even)
194 // Here count is odd.
195 ld8 word1[1]=[first1],8 // load an 8-byte word
196 cmp.eq p9,p10=1,count // if (count == 1)
197 adds count=-1,count // loaded an 8-byte word
199 add result1[0]=result1[0],word1[1]
201 cmp.ltu p6,p0=result1[0],word1[1]
203 (p6) adds result1[0]=1,result1[0]
204 (p9) br.cond.sptk .do_csum_exit // if (count == 1) exit
205 // Fall through to calculate the checksum, feeding result1[0] as
206 // the initial value in result1[0].
208 // Calculate the checksum loading two 8-byte words per loop.
212 shr.u count=count,1 // we do 16 bytes per loop
220 mov ar.lc=count // set lc
222 // result1[0] must be initialized in advance.
227 (ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1]
228 (pC1[1])adds carry1=1,carry1
229 (ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1]
230 (pC2[1])adds carry2=1,carry2
231 (ELD) add result1[LOAD_LATENCY-1]=result1[LOAD_LATENCY],word1[LOAD_LATENCY]
232 (ELD) add result2[LOAD_LATENCY-1]=result2[LOAD_LATENCY],word2[LOAD_LATENCY]
234 (p[0]) ld8 word1[0]=[first1],16
235 (p[0]) ld8 word2[0]=[first2],16
238 // Since len is a 32-bit value, carry cannot be larger than a 64-bit value.
239 (pC1[1])adds carry1=1,carry1 // since we miss the last one
240 (pC2[1])adds carry2=1,carry2
242 add result1[LOAD_LATENCY+1]=result1[LOAD_LATENCY+1],carry1
243 add result2[LOAD_LATENCY+1]=result2[LOAD_LATENCY+1],carry2
245 cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1
246 cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2
248 (p6) adds result1[LOAD_LATENCY+1]=1,result1[LOAD_LATENCY+1]
249 (p7) adds result2[LOAD_LATENCY+1]=1,result2[LOAD_LATENCY+1]
251 add result1[0]=result1[LOAD_LATENCY+1],result2[LOAD_LATENCY+1]
253 cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1]
255 (p6) adds result1[0]=1,result1[0]
259 // now fold 64 into 16 bits taking care of carry
260 // that's not very good because it has lots of sequentiality
264 shr.u tmp2=result1[0],32
266 add result1[0]=tmp1,tmp2
268 and tmp1=result1[0],tmp3
269 shr.u tmp2=result1[0],16
271 add result1[0]=tmp1,tmp2
273 and tmp1=result1[0],tmp3
274 shr.u tmp2=result1[0],16
276 add result1[0]=tmp1,tmp2
278 and tmp1=result1[0],tmp3
279 shr.u tmp2=result1[0],16
282 mov pr=saved_pr,0xffffffffffff0000
284 // if buf was odd then swap bytes
285 mov ar.pfs=saved_pfs // restore ar.ec
286 (p15) mux1 ret0=ret0,@rev // reverse word
289 (p15) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes
292 // I (Jun Nakajima) wrote an equivalent code (see below), but it was
293 // not much better than the original. So keep the original there so that
294 // someone else can challenge.
296 // shr.u word1[0]=result1[0],32
297 // zxt4 result1[0]=result1[0]
299 // add result1[0]=result1[0],word1[0]
301 // zxt2 result2[0]=result1[0]
302 // extr.u word1[0]=result1[0],16,16
303 // shr.u carry1=result1[0],32
305 // add result2[0]=result2[0],word1[0]
307 // add result2[0]=result2[0],carry1
309 // extr.u ret0=result2[0],16,16
311 // add ret0=ret0,result2[0]
314 // mov ar.pfs=saved_pfs // restore ar.ec
315 // mov pr=saved_pr,0xffffffffffff0000
317 // // if buf was odd then swap bytes
318 // mov ar.lc=saved_lc
319 //(p15) mux1 ret0=ret0,@rev // reverse word
321 //(p15) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes
322 // br.ret.sptk.many rp