nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / net / sunrpc / socklib.c
blob10b4319ebbca5816f02b4719f923852ea0301025
1 /*
2 * linux/net/sunrpc/socklib.c
4 * Common socket helper routines for RPC client and server
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
9 #include <linux/compiler.h>
10 #include <linux/netdevice.h>
11 #include <linux/gfp.h>
12 #include <linux/skbuff.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/udp.h>
16 #include <linux/sunrpc/xdr.h>
19 /**
20 * xdr_skb_read_bits - copy some data bits from skb to internal buffer
21 * @desc: sk_buff copy helper
22 * @to: copy destination
23 * @len: number of bytes to copy
25 * Possibly called several times to iterate over an sk_buff and copy
26 * data out of it.
28 size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
30 if (len > desc->count)
31 len = desc->count;
32 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
33 return 0;
34 desc->count -= len;
35 desc->offset += len;
36 return len;
38 EXPORT_SYMBOL_GPL(xdr_skb_read_bits);
40 /**
41 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
42 * @desc: sk_buff copy helper
43 * @to: copy destination
44 * @len: number of bytes to copy
46 * Same as skb_read_bits, but calculate a checksum at the same time.
48 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
50 unsigned int pos;
51 __wsum csum2;
53 if (len > desc->count)
54 len = desc->count;
55 pos = desc->offset;
56 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
57 desc->csum = csum_block_add(desc->csum, csum2, pos);
58 desc->count -= len;
59 desc->offset += len;
60 return len;
63 /**
64 * xdr_partial_copy_from_skb - copy data out of an skb
65 * @xdr: target XDR buffer
66 * @base: starting offset
67 * @desc: sk_buff copy helper
68 * @copy_actor: virtual method for copying data
71 ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
73 struct page **ppage = xdr->pages;
74 unsigned int len, pglen = xdr->page_len;
75 ssize_t copied = 0;
76 size_t ret;
78 len = xdr->head[0].iov_len;
79 if (base < len) {
80 len -= base;
81 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
82 copied += ret;
83 if (ret != len || !desc->count)
84 goto out;
85 base = 0;
86 } else
87 base -= len;
89 if (unlikely(pglen == 0))
90 goto copy_tail;
91 if (unlikely(base >= pglen)) {
92 base -= pglen;
93 goto copy_tail;
95 if (base || xdr->page_base) {
96 pglen -= base;
97 base += xdr->page_base;
98 ppage += base >> PAGE_CACHE_SHIFT;
99 base &= ~PAGE_CACHE_MASK;
101 do {
102 char *kaddr;
104 /* ACL likes to be lazy in allocating pages - ACLs
105 * are small by default but can get huge. */
106 if (unlikely(*ppage == NULL)) {
107 *ppage = alloc_page(GFP_ATOMIC);
108 if (unlikely(*ppage == NULL)) {
109 if (copied == 0)
110 copied = -ENOMEM;
111 goto out;
115 len = PAGE_CACHE_SIZE;
116 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
117 if (base) {
118 len -= base;
119 if (pglen < len)
120 len = pglen;
121 ret = copy_actor(desc, kaddr + base, len);
122 base = 0;
123 } else {
124 if (pglen < len)
125 len = pglen;
126 ret = copy_actor(desc, kaddr, len);
128 flush_dcache_page(*ppage);
129 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
130 copied += ret;
131 if (ret != len || !desc->count)
132 goto out;
133 ppage++;
134 } while ((pglen -= len) != 0);
135 copy_tail:
136 len = xdr->tail[0].iov_len;
137 if (base < len)
138 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
139 out:
140 return copied;
142 EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb);
145 * csum_partial_copy_to_xdr - checksum and copy data
146 * @xdr: target XDR buffer
147 * @skb: source skb
149 * We have set things up such that we perform the checksum of the UDP
150 * packet in parallel with the copies into the RPC client iovec. -DaveM
152 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
154 struct xdr_skb_reader desc;
156 desc.skb = skb;
157 desc.offset = sizeof(struct udphdr);
158 desc.count = skb->len - desc.offset;
160 if (skb_csum_unnecessary(skb))
161 goto no_checksum;
163 desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
164 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
165 return -1;
166 if (desc.offset != skb->len) {
167 __wsum csum2;
168 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
169 desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
171 if (desc.count)
172 return -1;
173 if (csum_fold(desc.csum))
174 return -1;
175 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
176 netdev_rx_csum_fault(skb->dev);
177 return 0;
178 no_checksum:
179 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
180 return -1;
181 if (desc.count)
182 return -1;
183 return 0;
185 EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);