2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
19 * XDR functions for basic NFS types
22 xdr_encode_netobj(u32
*p
, const struct xdr_netobj
*obj
)
24 unsigned int quadlen
= XDR_QUADLEN(obj
->len
);
26 p
[quadlen
] = 0; /* zero trailing bytes */
27 *p
++ = htonl(obj
->len
);
28 memcpy(p
, obj
->data
, obj
->len
);
29 return p
+ XDR_QUADLEN(obj
->len
);
33 xdr_decode_netobj(u32
*p
, struct xdr_netobj
*obj
)
37 if ((len
= ntohl(*p
++)) > XDR_MAX_NETOBJ
)
41 return p
+ XDR_QUADLEN(len
);
45 * xdr_encode_opaque_fixed - Encode fixed length opaque data
46 * @p: pointer to current position in XDR buffer.
47 * @ptr: pointer to data to encode (or NULL)
48 * @nbytes: size of data.
50 * Copy the array of data of length nbytes at ptr to the XDR buffer
51 * at position p, then align to the next 32-bit boundary by padding
52 * with zero bytes (see RFC1832).
53 * Note: if ptr is NULL, only the padding is performed.
55 * Returns the updated current XDR buffer position
58 u32
*xdr_encode_opaque_fixed(u32
*p
, const void *ptr
, unsigned int nbytes
)
60 if (likely(nbytes
!= 0)) {
61 unsigned int quadlen
= XDR_QUADLEN(nbytes
);
62 unsigned int padding
= (quadlen
<< 2) - nbytes
;
65 memcpy(p
, ptr
, nbytes
);
67 memset((char *)p
+ nbytes
, 0, padding
);
72 EXPORT_SYMBOL(xdr_encode_opaque_fixed
);
75 * xdr_encode_opaque - Encode variable length opaque data
76 * @p: pointer to current position in XDR buffer.
77 * @ptr: pointer to data to encode (or NULL)
78 * @nbytes: size of data.
80 * Returns the updated current XDR buffer position
82 u32
*xdr_encode_opaque(u32
*p
, const void *ptr
, unsigned int nbytes
)
85 return xdr_encode_opaque_fixed(p
, ptr
, nbytes
);
87 EXPORT_SYMBOL(xdr_encode_opaque
);
90 xdr_encode_string(u32
*p
, const char *string
)
92 return xdr_encode_array(p
, string
, strlen(string
));
96 xdr_decode_string(u32
*p
, char **sp
, int *lenp
, int maxlen
)
101 if ((len
= ntohl(*p
++)) > maxlen
)
105 if ((len
% 4) != 0) {
108 string
= (char *) (p
- 1);
109 memmove(string
, p
, len
);
113 return p
+ XDR_QUADLEN(len
);
117 xdr_decode_string_inplace(u32
*p
, char **sp
, int *lenp
, int maxlen
)
121 if ((len
= ntohl(*p
++)) > maxlen
)
125 return p
+ XDR_QUADLEN(len
);
129 xdr_encode_pages(struct xdr_buf
*xdr
, struct page
**pages
, unsigned int base
,
132 struct kvec
*tail
= xdr
->tail
;
136 xdr
->page_base
= base
;
139 p
= (u32
*)xdr
->head
[0].iov_base
+ XDR_QUADLEN(xdr
->head
[0].iov_len
);
144 unsigned int pad
= 4 - (len
& 3);
147 tail
->iov_base
= (char *)p
+ (len
& 3);
156 xdr_inline_pages(struct xdr_buf
*xdr
, unsigned int offset
,
157 struct page
**pages
, unsigned int base
, unsigned int len
)
159 struct kvec
*head
= xdr
->head
;
160 struct kvec
*tail
= xdr
->tail
;
161 char *buf
= (char *)head
->iov_base
;
162 unsigned int buflen
= head
->iov_len
;
164 head
->iov_len
= offset
;
167 xdr
->page_base
= base
;
170 tail
->iov_base
= buf
+ offset
;
171 tail
->iov_len
= buflen
- offset
;
178 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
180 * _shift_data_right_pages
181 * @pages: vector of pages containing both the source and dest memory area.
182 * @pgto_base: page vector address of destination
183 * @pgfrom_base: page vector address of source
184 * @len: number of bytes to copy
186 * Note: the addresses pgto_base and pgfrom_base are both calculated in
188 * if a memory area starts at byte 'base' in page 'pages[i]',
189 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
190 * Also note: pgfrom_base must be < pgto_base, but the memory areas
191 * they point to may overlap.
194 _shift_data_right_pages(struct page
**pages
, size_t pgto_base
,
195 size_t pgfrom_base
, size_t len
)
197 struct page
**pgfrom
, **pgto
;
201 BUG_ON(pgto_base
<= pgfrom_base
);
206 pgto
= pages
+ (pgto_base
>> PAGE_CACHE_SHIFT
);
207 pgfrom
= pages
+ (pgfrom_base
>> PAGE_CACHE_SHIFT
);
209 pgto_base
&= ~PAGE_CACHE_MASK
;
210 pgfrom_base
&= ~PAGE_CACHE_MASK
;
213 /* Are any pointers crossing a page boundary? */
214 if (pgto_base
== 0) {
215 flush_dcache_page(*pgto
);
216 pgto_base
= PAGE_CACHE_SIZE
;
219 if (pgfrom_base
== 0) {
220 pgfrom_base
= PAGE_CACHE_SIZE
;
225 if (copy
> pgto_base
)
227 if (copy
> pgfrom_base
)
232 vto
= kmap_atomic(*pgto
, KM_USER0
);
233 vfrom
= kmap_atomic(*pgfrom
, KM_USER1
);
234 memmove(vto
+ pgto_base
, vfrom
+ pgfrom_base
, copy
);
235 kunmap_atomic(vfrom
, KM_USER1
);
236 kunmap_atomic(vto
, KM_USER0
);
238 } while ((len
-= copy
) != 0);
239 flush_dcache_page(*pgto
);
244 * @pages: array of pages
245 * @pgbase: page vector address of destination
246 * @p: pointer to source data
249 * Copies data from an arbitrary memory location into an array of pages
250 * The copy is assumed to be non-overlapping.
253 _copy_to_pages(struct page
**pages
, size_t pgbase
, const char *p
, size_t len
)
259 pgto
= pages
+ (pgbase
>> PAGE_CACHE_SHIFT
);
260 pgbase
&= ~PAGE_CACHE_MASK
;
263 copy
= PAGE_CACHE_SIZE
- pgbase
;
267 vto
= kmap_atomic(*pgto
, KM_USER0
);
268 memcpy(vto
+ pgbase
, p
, copy
);
269 kunmap_atomic(vto
, KM_USER0
);
272 if (pgbase
== PAGE_CACHE_SIZE
) {
273 flush_dcache_page(*pgto
);
279 } while ((len
-= copy
) != 0);
280 flush_dcache_page(*pgto
);
285 * @p: pointer to destination
286 * @pages: array of pages
287 * @pgbase: offset of source data
290 * Copies data into an arbitrary memory location from an array of pages
291 * The copy is assumed to be non-overlapping.
294 _copy_from_pages(char *p
, struct page
**pages
, size_t pgbase
, size_t len
)
296 struct page
**pgfrom
;
300 pgfrom
= pages
+ (pgbase
>> PAGE_CACHE_SHIFT
);
301 pgbase
&= ~PAGE_CACHE_MASK
;
304 copy
= PAGE_CACHE_SIZE
- pgbase
;
308 vfrom
= kmap_atomic(*pgfrom
, KM_USER0
);
309 memcpy(p
, vfrom
+ pgbase
, copy
);
310 kunmap_atomic(vfrom
, KM_USER0
);
313 if (pgbase
== PAGE_CACHE_SIZE
) {
319 } while ((len
-= copy
) != 0);
325 * @len: bytes to remove from buf->head[0]
327 * Shrinks XDR buffer's header kvec buf->head[0] by
328 * 'len' bytes. The extra data is not lost, but is instead
329 * moved into the inlined pages and/or the tail.
332 xdr_shrink_bufhead(struct xdr_buf
*buf
, size_t len
)
334 struct kvec
*head
, *tail
;
336 unsigned int pglen
= buf
->page_len
;
340 BUG_ON (len
> head
->iov_len
);
342 /* Shift the tail first */
343 if (tail
->iov_len
!= 0) {
344 if (tail
->iov_len
> len
) {
345 copy
= tail
->iov_len
- len
;
346 memmove((char *)tail
->iov_base
+ len
,
347 tail
->iov_base
, copy
);
349 /* Copy from the inlined pages into the tail */
354 if (offs
>= tail
->iov_len
)
356 else if (copy
> tail
->iov_len
- offs
)
357 copy
= tail
->iov_len
- offs
;
359 _copy_from_pages((char *)tail
->iov_base
+ offs
,
361 buf
->page_base
+ pglen
+ offs
- len
,
363 /* Do we also need to copy data from the head into the tail ? */
365 offs
= copy
= len
- pglen
;
366 if (copy
> tail
->iov_len
)
367 copy
= tail
->iov_len
;
368 memcpy(tail
->iov_base
,
369 (char *)head
->iov_base
+
370 head
->iov_len
- offs
,
374 /* Now handle pages */
377 _shift_data_right_pages(buf
->pages
,
378 buf
->page_base
+ len
,
384 _copy_to_pages(buf
->pages
, buf
->page_base
,
385 (char *)head
->iov_base
+ head
->iov_len
- len
,
388 head
->iov_len
-= len
;
390 /* Have we truncated the message? */
391 if (buf
->len
> buf
->buflen
)
392 buf
->len
= buf
->buflen
;
398 * @len: bytes to remove from buf->pages
400 * Shrinks XDR buffer's page array buf->pages by
401 * 'len' bytes. The extra data is not lost, but is instead
402 * moved into the tail.
405 xdr_shrink_pagelen(struct xdr_buf
*buf
, size_t len
)
410 unsigned int pglen
= buf
->page_len
;
413 BUG_ON (len
> pglen
);
415 /* Shift the tail first */
416 if (tail
->iov_len
!= 0) {
417 p
= (char *)tail
->iov_base
+ len
;
418 if (tail
->iov_len
> len
) {
419 copy
= tail
->iov_len
- len
;
420 memmove(p
, tail
->iov_base
, copy
);
423 /* Copy from the inlined pages into the tail */
425 if (copy
> tail
->iov_len
)
426 copy
= tail
->iov_len
;
427 _copy_from_pages((char *)tail
->iov_base
,
428 buf
->pages
, buf
->page_base
+ pglen
- len
,
431 buf
->page_len
-= len
;
433 /* Have we truncated the message? */
434 if (buf
->len
> buf
->buflen
)
435 buf
->len
= buf
->buflen
;
439 xdr_shift_buf(struct xdr_buf
*buf
, size_t len
)
441 xdr_shrink_bufhead(buf
, len
);
445 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
446 * @xdr: pointer to xdr_stream struct
447 * @buf: pointer to XDR buffer in which to encode data
448 * @p: current pointer inside XDR buffer
450 * Note: at the moment the RPC client only passes the length of our
451 * scratch buffer in the xdr_buf's header kvec. Previously this
452 * meant we needed to call xdr_adjust_iovec() after encoding the
453 * data. With the new scheme, the xdr_stream manages the details
454 * of the buffer length, and takes care of adjusting the kvec
457 void xdr_init_encode(struct xdr_stream
*xdr
, struct xdr_buf
*buf
, uint32_t *p
)
459 struct kvec
*iov
= buf
->head
;
460 int scratch_len
= buf
->buflen
- buf
->page_len
- buf
->tail
[0].iov_len
;
462 BUG_ON(scratch_len
< 0);
465 xdr
->p
= (uint32_t *)((char *)iov
->iov_base
+ iov
->iov_len
);
466 xdr
->end
= (uint32_t *)((char *)iov
->iov_base
+ scratch_len
);
467 BUG_ON(iov
->iov_len
> scratch_len
);
469 if (p
!= xdr
->p
&& p
!= NULL
) {
472 BUG_ON(p
< xdr
->p
|| p
> xdr
->end
);
473 len
= (char *)p
- (char *)xdr
->p
;
479 EXPORT_SYMBOL(xdr_init_encode
);
482 * xdr_reserve_space - Reserve buffer space for sending
483 * @xdr: pointer to xdr_stream
484 * @nbytes: number of bytes to reserve
486 * Checks that we have enough buffer space to encode 'nbytes' more
487 * bytes of data. If so, update the total xdr_buf length, and
488 * adjust the length of the current kvec.
490 uint32_t * xdr_reserve_space(struct xdr_stream
*xdr
, size_t nbytes
)
492 uint32_t *p
= xdr
->p
;
495 /* align nbytes on the next 32-bit boundary */
498 q
= p
+ (nbytes
>> 2);
499 if (unlikely(q
> xdr
->end
|| q
< p
))
502 xdr
->iov
->iov_len
+= nbytes
;
503 xdr
->buf
->len
+= nbytes
;
506 EXPORT_SYMBOL(xdr_reserve_space
);
509 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
510 * @xdr: pointer to xdr_stream
511 * @pages: list of pages
512 * @base: offset of first byte
513 * @len: length of data in bytes
516 void xdr_write_pages(struct xdr_stream
*xdr
, struct page
**pages
, unsigned int base
,
519 struct xdr_buf
*buf
= xdr
->buf
;
520 struct kvec
*iov
= buf
->tail
;
522 buf
->page_base
= base
;
525 iov
->iov_base
= (char *)xdr
->p
;
530 unsigned int pad
= 4 - (len
& 3);
532 BUG_ON(xdr
->p
>= xdr
->end
);
533 iov
->iov_base
= (char *)xdr
->p
+ (len
& 3);
541 EXPORT_SYMBOL(xdr_write_pages
);
544 * xdr_init_decode - Initialize an xdr_stream for decoding data.
545 * @xdr: pointer to xdr_stream struct
546 * @buf: pointer to XDR buffer from which to decode data
547 * @p: current pointer inside XDR buffer
549 void xdr_init_decode(struct xdr_stream
*xdr
, struct xdr_buf
*buf
, uint32_t *p
)
551 struct kvec
*iov
= buf
->head
;
552 unsigned int len
= iov
->iov_len
;
559 xdr
->end
= (uint32_t *)((char *)iov
->iov_base
+ len
);
561 EXPORT_SYMBOL(xdr_init_decode
);
564 * xdr_inline_decode - Retrieve non-page XDR data to decode
565 * @xdr: pointer to xdr_stream struct
566 * @nbytes: number of bytes of data to decode
568 * Check if the input buffer is long enough to enable us to decode
569 * 'nbytes' more bytes of data starting at the current position.
570 * If so return the current pointer, then update the current
573 uint32_t * xdr_inline_decode(struct xdr_stream
*xdr
, size_t nbytes
)
575 uint32_t *p
= xdr
->p
;
576 uint32_t *q
= p
+ XDR_QUADLEN(nbytes
);
578 if (unlikely(q
> xdr
->end
|| q
< p
))
583 EXPORT_SYMBOL(xdr_inline_decode
);
586 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
587 * @xdr: pointer to xdr_stream struct
588 * @len: number of bytes of page data
590 * Moves data beyond the current pointer position from the XDR head[] buffer
591 * into the page list. Any data that lies beyond current position + "len"
592 * bytes is moved into the XDR tail[]. The current pointer is then
593 * repositioned at the beginning of the XDR tail.
595 void xdr_read_pages(struct xdr_stream
*xdr
, unsigned int len
)
597 struct xdr_buf
*buf
= xdr
->buf
;
603 /* Realign pages to current pointer position */
605 shift
= iov
->iov_len
+ (char *)iov
->iov_base
- (char *)xdr
->p
;
607 xdr_shrink_bufhead(buf
, shift
);
609 /* Truncate page data and move it into the tail */
610 if (buf
->page_len
> len
)
611 xdr_shrink_pagelen(buf
, buf
->page_len
- len
);
612 padding
= (XDR_QUADLEN(len
) << 2) - len
;
613 xdr
->iov
= iov
= buf
->tail
;
614 /* Compute remaining message length. */
616 shift
= buf
->buflen
- buf
->len
;
622 * Position current pointer at beginning of tail, and
623 * set remaining message length.
625 xdr
->p
= (uint32_t *)((char *)iov
->iov_base
+ padding
);
626 xdr
->end
= (uint32_t *)((char *)iov
->iov_base
+ end
);
628 EXPORT_SYMBOL(xdr_read_pages
);
630 static struct kvec empty_iov
= {.iov_base
= NULL
, .iov_len
= 0};
633 xdr_buf_from_iov(struct kvec
*iov
, struct xdr_buf
*buf
)
636 buf
->tail
[0] = empty_iov
;
638 buf
->buflen
= buf
->len
= iov
->iov_len
;
641 /* Sets subiov to the intersection of iov with the buffer of length len
642 * starting base bytes after iov. Indicates empty intersection by setting
643 * length of subiov to zero. Decrements len by length of subiov, sets base
644 * to zero (or decrements it by length of iov if subiov is empty). */
646 iov_subsegment(struct kvec
*iov
, struct kvec
*subiov
, int *base
, int *len
)
648 if (*base
> iov
->iov_len
) {
649 subiov
->iov_base
= NULL
;
651 *base
-= iov
->iov_len
;
653 subiov
->iov_base
= iov
->iov_base
+ *base
;
654 subiov
->iov_len
= min(*len
, (int)iov
->iov_len
- *base
);
657 *len
-= subiov
->iov_len
;
660 /* Sets subbuf to the portion of buf of length len beginning base bytes
661 * from the start of buf. Returns -1 if base of length are out of bounds. */
663 xdr_buf_subsegment(struct xdr_buf
*buf
, struct xdr_buf
*subbuf
,
668 subbuf
->buflen
= subbuf
->len
= len
;
669 iov_subsegment(buf
->head
, subbuf
->head
, &base
, &len
);
671 if (base
< buf
->page_len
) {
672 i
= (base
+ buf
->page_base
) >> PAGE_CACHE_SHIFT
;
673 subbuf
->pages
= &buf
->pages
[i
];
674 subbuf
->page_base
= (base
+ buf
->page_base
) & ~PAGE_CACHE_MASK
;
675 subbuf
->page_len
= min((int)buf
->page_len
- base
, len
);
676 len
-= subbuf
->page_len
;
679 base
-= buf
->page_len
;
680 subbuf
->page_len
= 0;
683 iov_subsegment(buf
->tail
, subbuf
->tail
, &base
, &len
);
689 /* obj is assumed to point to allocated memory of size at least len: */
691 read_bytes_from_xdr_buf(struct xdr_buf
*buf
, int base
, void *obj
, int len
)
693 struct xdr_buf subbuf
;
697 status
= xdr_buf_subsegment(buf
, &subbuf
, base
, len
);
700 this_len
= min(len
, (int)subbuf
.head
[0].iov_len
);
701 memcpy(obj
, subbuf
.head
[0].iov_base
, this_len
);
704 this_len
= min(len
, (int)subbuf
.page_len
);
706 _copy_from_pages(obj
, subbuf
.pages
, subbuf
.page_base
, this_len
);
709 this_len
= min(len
, (int)subbuf
.tail
[0].iov_len
);
710 memcpy(obj
, subbuf
.tail
[0].iov_base
, this_len
);
715 /* obj is assumed to point to allocated memory of size at least len: */
717 write_bytes_to_xdr_buf(struct xdr_buf
*buf
, int base
, void *obj
, int len
)
719 struct xdr_buf subbuf
;
723 status
= xdr_buf_subsegment(buf
, &subbuf
, base
, len
);
726 this_len
= min(len
, (int)subbuf
.head
[0].iov_len
);
727 memcpy(subbuf
.head
[0].iov_base
, obj
, this_len
);
730 this_len
= min(len
, (int)subbuf
.page_len
);
732 _copy_to_pages(subbuf
.pages
, subbuf
.page_base
, obj
, this_len
);
735 this_len
= min(len
, (int)subbuf
.tail
[0].iov_len
);
736 memcpy(subbuf
.tail
[0].iov_base
, obj
, this_len
);
742 xdr_decode_word(struct xdr_buf
*buf
, int base
, u32
*obj
)
747 status
= read_bytes_from_xdr_buf(buf
, base
, &raw
, sizeof(*obj
));
755 xdr_encode_word(struct xdr_buf
*buf
, int base
, u32 obj
)
757 u32 raw
= htonl(obj
);
759 return write_bytes_to_xdr_buf(buf
, base
, &raw
, sizeof(obj
));
762 /* If the netobj starting offset bytes from the start of xdr_buf is contained
763 * entirely in the head or the tail, set object to point to it; otherwise
764 * try to find space for it at the end of the tail, copy it there, and
765 * set obj to point to it. */
767 xdr_buf_read_netobj(struct xdr_buf
*buf
, struct xdr_netobj
*obj
, int offset
)
769 u32 tail_offset
= buf
->head
[0].iov_len
+ buf
->page_len
;
772 if (xdr_decode_word(buf
, offset
, &obj
->len
))
774 obj_end_offset
= offset
+ 4 + obj
->len
;
776 if (obj_end_offset
<= buf
->head
[0].iov_len
) {
777 /* The obj is contained entirely in the head: */
778 obj
->data
= buf
->head
[0].iov_base
+ offset
+ 4;
779 } else if (offset
+ 4 >= tail_offset
) {
780 if (obj_end_offset
- tail_offset
781 > buf
->tail
[0].iov_len
)
783 /* The obj is contained entirely in the tail: */
784 obj
->data
= buf
->tail
[0].iov_base
785 + offset
- tail_offset
+ 4;
787 /* use end of tail as storage for obj:
788 * (We don't copy to the beginning because then we'd have
789 * to worry about doing a potentially overlapping copy.
790 * This assumes the object is at most half the length of the
792 if (obj
->len
> buf
->tail
[0].iov_len
)
794 obj
->data
= buf
->tail
[0].iov_base
+ buf
->tail
[0].iov_len
-
796 if (read_bytes_from_xdr_buf(buf
, offset
+ 4,
797 obj
->data
, obj
->len
))
806 /* Returns 0 on success, or else a negative error code. */
808 xdr_xcode_array2(struct xdr_buf
*buf
, unsigned int base
,
809 struct xdr_array2_desc
*desc
, int encode
)
811 char *elem
= NULL
, *c
;
812 unsigned int copied
= 0, todo
, avail_here
;
813 struct page
**ppages
= NULL
;
817 if (xdr_encode_word(buf
, base
, desc
->array_len
) != 0)
820 if (xdr_decode_word(buf
, base
, &desc
->array_len
) != 0 ||
821 desc
->array_len
> desc
->array_maxlen
||
822 (unsigned long) base
+ 4 + desc
->array_len
*
823 desc
->elem_size
> buf
->len
)
831 todo
= desc
->array_len
* desc
->elem_size
;
834 if (todo
&& base
< buf
->head
->iov_len
) {
835 c
= buf
->head
->iov_base
+ base
;
836 avail_here
= min_t(unsigned int, todo
,
837 buf
->head
->iov_len
- base
);
840 while (avail_here
>= desc
->elem_size
) {
841 err
= desc
->xcode(desc
, c
);
844 c
+= desc
->elem_size
;
845 avail_here
-= desc
->elem_size
;
849 elem
= kmalloc(desc
->elem_size
, GFP_KERNEL
);
855 err
= desc
->xcode(desc
, elem
);
858 memcpy(c
, elem
, avail_here
);
860 memcpy(elem
, c
, avail_here
);
863 base
= buf
->head
->iov_len
; /* align to start of pages */
866 /* process pages array */
867 base
-= buf
->head
->iov_len
;
868 if (todo
&& base
< buf
->page_len
) {
869 unsigned int avail_page
;
871 avail_here
= min(todo
, buf
->page_len
- base
);
874 base
+= buf
->page_base
;
875 ppages
= buf
->pages
+ (base
>> PAGE_CACHE_SHIFT
);
876 base
&= ~PAGE_CACHE_MASK
;
877 avail_page
= min_t(unsigned int, PAGE_CACHE_SIZE
- base
,
879 c
= kmap(*ppages
) + base
;
882 avail_here
-= avail_page
;
883 if (copied
|| avail_page
< desc
->elem_size
) {
884 unsigned int l
= min(avail_page
,
885 desc
->elem_size
- copied
);
887 elem
= kmalloc(desc
->elem_size
,
895 err
= desc
->xcode(desc
, elem
);
899 memcpy(c
, elem
+ copied
, l
);
901 if (copied
== desc
->elem_size
)
904 memcpy(elem
+ copied
, c
, l
);
906 if (copied
== desc
->elem_size
) {
907 err
= desc
->xcode(desc
, elem
);
916 while (avail_page
>= desc
->elem_size
) {
917 err
= desc
->xcode(desc
, c
);
920 c
+= desc
->elem_size
;
921 avail_page
-= desc
->elem_size
;
924 unsigned int l
= min(avail_page
,
925 desc
->elem_size
- copied
);
927 elem
= kmalloc(desc
->elem_size
,
935 err
= desc
->xcode(desc
, elem
);
939 memcpy(c
, elem
+ copied
, l
);
941 if (copied
== desc
->elem_size
)
944 memcpy(elem
+ copied
, c
, l
);
946 if (copied
== desc
->elem_size
) {
947 err
= desc
->xcode(desc
, elem
);
960 avail_page
= min(avail_here
,
961 (unsigned int) PAGE_CACHE_SIZE
);
963 base
= buf
->page_len
; /* align to start of tail */
967 base
-= buf
->page_len
;
969 c
= buf
->tail
->iov_base
+ base
;
971 unsigned int l
= desc
->elem_size
- copied
;
974 memcpy(c
, elem
+ copied
, l
);
976 memcpy(elem
+ copied
, c
, l
);
977 err
= desc
->xcode(desc
, elem
);
985 err
= desc
->xcode(desc
, c
);
988 c
+= desc
->elem_size
;
989 todo
-= desc
->elem_size
;
1002 xdr_decode_array2(struct xdr_buf
*buf
, unsigned int base
,
1003 struct xdr_array2_desc
*desc
)
1005 if (base
>= buf
->len
)
1008 return xdr_xcode_array2(buf
, base
, desc
, 0);
1012 xdr_encode_array2(struct xdr_buf
*buf
, unsigned int base
,
1013 struct xdr_array2_desc
*desc
)
1015 if ((unsigned long) base
+ 4 + desc
->array_len
* desc
->elem_size
>
1016 buf
->head
->iov_len
+ buf
->page_len
+ buf
->tail
->iov_len
)
1019 return xdr_xcode_array2(buf
, base
, desc
, 1);