Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * linux/net/sunrpc/xdr.c
0004  *
0005  * Generic XDR support.
0006  *
0007  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
0008  */
0009 
0010 #include <linux/module.h>
0011 #include <linux/slab.h>
0012 #include <linux/types.h>
0013 #include <linux/string.h>
0014 #include <linux/kernel.h>
0015 #include <linux/pagemap.h>
0016 #include <linux/errno.h>
0017 #include <linux/sunrpc/xdr.h>
0018 #include <linux/sunrpc/msg_prot.h>
0019 #include <linux/bvec.h>
0020 #include <trace/events/sunrpc.h>
0021 
0022 static void _copy_to_pages(struct page **, size_t, const char *, size_t);
0023 
0024 
0025 /*
0026  * XDR functions for basic NFS types
0027  */
0028 __be32 *
0029 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
0030 {
0031     unsigned int    quadlen = XDR_QUADLEN(obj->len);
0032 
0033     p[quadlen] = 0;     /* zero trailing bytes */
0034     *p++ = cpu_to_be32(obj->len);
0035     memcpy(p, obj->data, obj->len);
0036     return p + XDR_QUADLEN(obj->len);
0037 }
0038 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
0039 
0040 __be32 *
0041 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
0042 {
0043     unsigned int    len;
0044 
0045     if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
0046         return NULL;
0047     obj->len  = len;
0048     obj->data = (u8 *) p;
0049     return p + XDR_QUADLEN(len);
0050 }
0051 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
0052 
0053 /**
0054  * xdr_encode_opaque_fixed - Encode fixed length opaque data
0055  * @p: pointer to current position in XDR buffer.
0056  * @ptr: pointer to data to encode (or NULL)
0057  * @nbytes: size of data.
0058  *
0059  * Copy the array of data of length nbytes at ptr to the XDR buffer
0060  * at position p, then align to the next 32-bit boundary by padding
0061  * with zero bytes (see RFC1832).
0062  * Note: if ptr is NULL, only the padding is performed.
0063  *
0064  * Returns the updated current XDR buffer position
0065  *
0066  */
0067 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
0068 {
0069     if (likely(nbytes != 0)) {
0070         unsigned int quadlen = XDR_QUADLEN(nbytes);
0071         unsigned int padding = (quadlen << 2) - nbytes;
0072 
0073         if (ptr != NULL)
0074             memcpy(p, ptr, nbytes);
0075         if (padding != 0)
0076             memset((char *)p + nbytes, 0, padding);
0077         p += quadlen;
0078     }
0079     return p;
0080 }
0081 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
0082 
0083 /**
0084  * xdr_encode_opaque - Encode variable length opaque data
0085  * @p: pointer to current position in XDR buffer.
0086  * @ptr: pointer to data to encode (or NULL)
0087  * @nbytes: size of data.
0088  *
0089  * Returns the updated current XDR buffer position
0090  */
0091 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
0092 {
0093     *p++ = cpu_to_be32(nbytes);
0094     return xdr_encode_opaque_fixed(p, ptr, nbytes);
0095 }
0096 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
0097 
0098 __be32 *
0099 xdr_encode_string(__be32 *p, const char *string)
0100 {
0101     return xdr_encode_array(p, string, strlen(string));
0102 }
0103 EXPORT_SYMBOL_GPL(xdr_encode_string);
0104 
0105 __be32 *
0106 xdr_decode_string_inplace(__be32 *p, char **sp,
0107               unsigned int *lenp, unsigned int maxlen)
0108 {
0109     u32 len;
0110 
0111     len = be32_to_cpu(*p++);
0112     if (len > maxlen)
0113         return NULL;
0114     *lenp = len;
0115     *sp = (char *) p;
0116     return p + XDR_QUADLEN(len);
0117 }
0118 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
0119 
0120 /**
0121  * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
0122  * @buf: XDR buffer where string resides
0123  * @len: length of string, in bytes
0124  *
0125  */
0126 void xdr_terminate_string(const struct xdr_buf *buf, const u32 len)
0127 {
0128     char *kaddr;
0129 
0130     kaddr = kmap_atomic(buf->pages[0]);
0131     kaddr[buf->page_base + len] = '\0';
0132     kunmap_atomic(kaddr);
0133 }
0134 EXPORT_SYMBOL_GPL(xdr_terminate_string);
0135 
0136 size_t xdr_buf_pagecount(const struct xdr_buf *buf)
0137 {
0138     if (!buf->page_len)
0139         return 0;
0140     return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
0141 }
0142 
0143 int
0144 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
0145 {
0146     size_t i, n = xdr_buf_pagecount(buf);
0147 
0148     if (n != 0 && buf->bvec == NULL) {
0149         buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
0150         if (!buf->bvec)
0151             return -ENOMEM;
0152         for (i = 0; i < n; i++) {
0153             buf->bvec[i].bv_page = buf->pages[i];
0154             buf->bvec[i].bv_len = PAGE_SIZE;
0155             buf->bvec[i].bv_offset = 0;
0156         }
0157     }
0158     return 0;
0159 }
0160 
0161 void
0162 xdr_free_bvec(struct xdr_buf *buf)
0163 {
0164     kfree(buf->bvec);
0165     buf->bvec = NULL;
0166 }
0167 
0168 /**
0169  * xdr_inline_pages - Prepare receive buffer for a large reply
0170  * @xdr: xdr_buf into which reply will be placed
0171  * @offset: expected offset where data payload will start, in bytes
0172  * @pages: vector of struct page pointers
0173  * @base: offset in first page where receive should start, in bytes
0174  * @len: expected size of the upper layer data payload, in bytes
0175  *
0176  */
0177 void
0178 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
0179          struct page **pages, unsigned int base, unsigned int len)
0180 {
0181     struct kvec *head = xdr->head;
0182     struct kvec *tail = xdr->tail;
0183     char *buf = (char *)head->iov_base;
0184     unsigned int buflen = head->iov_len;
0185 
0186     head->iov_len  = offset;
0187 
0188     xdr->pages = pages;
0189     xdr->page_base = base;
0190     xdr->page_len = len;
0191 
0192     tail->iov_base = buf + offset;
0193     tail->iov_len = buflen - offset;
0194     xdr->buflen += len;
0195 }
0196 EXPORT_SYMBOL_GPL(xdr_inline_pages);
0197 
0198 /*
0199  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
0200  */
0201 
0202 /**
0203  * _shift_data_left_pages
0204  * @pages: vector of pages containing both the source and dest memory area.
0205  * @pgto_base: page vector address of destination
0206  * @pgfrom_base: page vector address of source
0207  * @len: number of bytes to copy
0208  *
0209  * Note: the addresses pgto_base and pgfrom_base are both calculated in
0210  *       the same way:
0211  *            if a memory area starts at byte 'base' in page 'pages[i]',
0212  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
0213  * Alse note: pgto_base must be < pgfrom_base, but the memory areas
0214  *  they point to may overlap.
0215  */
0216 static void
0217 _shift_data_left_pages(struct page **pages, size_t pgto_base,
0218             size_t pgfrom_base, size_t len)
0219 {
0220     struct page **pgfrom, **pgto;
0221     char *vfrom, *vto;
0222     size_t copy;
0223 
0224     BUG_ON(pgfrom_base <= pgto_base);
0225 
0226     if (!len)
0227         return;
0228 
0229     pgto = pages + (pgto_base >> PAGE_SHIFT);
0230     pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
0231 
0232     pgto_base &= ~PAGE_MASK;
0233     pgfrom_base &= ~PAGE_MASK;
0234 
0235     do {
0236         if (pgto_base >= PAGE_SIZE) {
0237             pgto_base = 0;
0238             pgto++;
0239         }
0240         if (pgfrom_base >= PAGE_SIZE){
0241             pgfrom_base = 0;
0242             pgfrom++;
0243         }
0244 
0245         copy = len;
0246         if (copy > (PAGE_SIZE - pgto_base))
0247             copy = PAGE_SIZE - pgto_base;
0248         if (copy > (PAGE_SIZE - pgfrom_base))
0249             copy = PAGE_SIZE - pgfrom_base;
0250 
0251         vto = kmap_atomic(*pgto);
0252         if (*pgto != *pgfrom) {
0253             vfrom = kmap_atomic(*pgfrom);
0254             memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
0255             kunmap_atomic(vfrom);
0256         } else
0257             memmove(vto + pgto_base, vto + pgfrom_base, copy);
0258         flush_dcache_page(*pgto);
0259         kunmap_atomic(vto);
0260 
0261         pgto_base += copy;
0262         pgfrom_base += copy;
0263 
0264     } while ((len -= copy) != 0);
0265 }
0266 
0267 /**
0268  * _shift_data_right_pages
0269  * @pages: vector of pages containing both the source and dest memory area.
0270  * @pgto_base: page vector address of destination
0271  * @pgfrom_base: page vector address of source
0272  * @len: number of bytes to copy
0273  *
0274  * Note: the addresses pgto_base and pgfrom_base are both calculated in
0275  *       the same way:
0276  *            if a memory area starts at byte 'base' in page 'pages[i]',
0277  *            then its address is given as (i << PAGE_SHIFT) + base
0278  * Also note: pgfrom_base must be < pgto_base, but the memory areas
0279  *  they point to may overlap.
0280  */
0281 static void
0282 _shift_data_right_pages(struct page **pages, size_t pgto_base,
0283         size_t pgfrom_base, size_t len)
0284 {
0285     struct page **pgfrom, **pgto;
0286     char *vfrom, *vto;
0287     size_t copy;
0288 
0289     BUG_ON(pgto_base <= pgfrom_base);
0290 
0291     if (!len)
0292         return;
0293 
0294     pgto_base += len;
0295     pgfrom_base += len;
0296 
0297     pgto = pages + (pgto_base >> PAGE_SHIFT);
0298     pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
0299 
0300     pgto_base &= ~PAGE_MASK;
0301     pgfrom_base &= ~PAGE_MASK;
0302 
0303     do {
0304         /* Are any pointers crossing a page boundary? */
0305         if (pgto_base == 0) {
0306             pgto_base = PAGE_SIZE;
0307             pgto--;
0308         }
0309         if (pgfrom_base == 0) {
0310             pgfrom_base = PAGE_SIZE;
0311             pgfrom--;
0312         }
0313 
0314         copy = len;
0315         if (copy > pgto_base)
0316             copy = pgto_base;
0317         if (copy > pgfrom_base)
0318             copy = pgfrom_base;
0319         pgto_base -= copy;
0320         pgfrom_base -= copy;
0321 
0322         vto = kmap_atomic(*pgto);
0323         if (*pgto != *pgfrom) {
0324             vfrom = kmap_atomic(*pgfrom);
0325             memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
0326             kunmap_atomic(vfrom);
0327         } else
0328             memmove(vto + pgto_base, vto + pgfrom_base, copy);
0329         flush_dcache_page(*pgto);
0330         kunmap_atomic(vto);
0331 
0332     } while ((len -= copy) != 0);
0333 }
0334 
0335 /**
0336  * _copy_to_pages
0337  * @pages: array of pages
0338  * @pgbase: page vector address of destination
0339  * @p: pointer to source data
0340  * @len: length
0341  *
0342  * Copies data from an arbitrary memory location into an array of pages
0343  * The copy is assumed to be non-overlapping.
0344  */
0345 static void
0346 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
0347 {
0348     struct page **pgto;
0349     char *vto;
0350     size_t copy;
0351 
0352     if (!len)
0353         return;
0354 
0355     pgto = pages + (pgbase >> PAGE_SHIFT);
0356     pgbase &= ~PAGE_MASK;
0357 
0358     for (;;) {
0359         copy = PAGE_SIZE - pgbase;
0360         if (copy > len)
0361             copy = len;
0362 
0363         vto = kmap_atomic(*pgto);
0364         memcpy(vto + pgbase, p, copy);
0365         kunmap_atomic(vto);
0366 
0367         len -= copy;
0368         if (len == 0)
0369             break;
0370 
0371         pgbase += copy;
0372         if (pgbase == PAGE_SIZE) {
0373             flush_dcache_page(*pgto);
0374             pgbase = 0;
0375             pgto++;
0376         }
0377         p += copy;
0378     }
0379     flush_dcache_page(*pgto);
0380 }
0381 
0382 /**
0383  * _copy_from_pages
0384  * @p: pointer to destination
0385  * @pages: array of pages
0386  * @pgbase: offset of source data
0387  * @len: length
0388  *
0389  * Copies data into an arbitrary memory location from an array of pages
0390  * The copy is assumed to be non-overlapping.
0391  */
0392 void
0393 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
0394 {
0395     struct page **pgfrom;
0396     char *vfrom;
0397     size_t copy;
0398 
0399     if (!len)
0400         return;
0401 
0402     pgfrom = pages + (pgbase >> PAGE_SHIFT);
0403     pgbase &= ~PAGE_MASK;
0404 
0405     do {
0406         copy = PAGE_SIZE - pgbase;
0407         if (copy > len)
0408             copy = len;
0409 
0410         vfrom = kmap_atomic(*pgfrom);
0411         memcpy(p, vfrom + pgbase, copy);
0412         kunmap_atomic(vfrom);
0413 
0414         pgbase += copy;
0415         if (pgbase == PAGE_SIZE) {
0416             pgbase = 0;
0417             pgfrom++;
0418         }
0419         p += copy;
0420 
0421     } while ((len -= copy) != 0);
0422 }
0423 EXPORT_SYMBOL_GPL(_copy_from_pages);
0424 
0425 static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base,
0426                  unsigned int len)
0427 {
0428     if (base >= iov->iov_len)
0429         return;
0430     if (len > iov->iov_len - base)
0431         len = iov->iov_len - base;
0432     memset(iov->iov_base + base, 0, len);
0433 }
0434 
0435 /**
0436  * xdr_buf_pages_zero
0437  * @buf: xdr_buf
0438  * @pgbase: beginning offset
0439  * @len: length
0440  */
0441 static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase,
0442                    unsigned int len)
0443 {
0444     struct page **pages = buf->pages;
0445     struct page **page;
0446     char *vpage;
0447     unsigned int zero;
0448 
0449     if (!len)
0450         return;
0451     if (pgbase >= buf->page_len) {
0452         xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len);
0453         return;
0454     }
0455     if (pgbase + len > buf->page_len) {
0456         xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len);
0457         len = buf->page_len - pgbase;
0458     }
0459 
0460     pgbase += buf->page_base;
0461 
0462     page = pages + (pgbase >> PAGE_SHIFT);
0463     pgbase &= ~PAGE_MASK;
0464 
0465     do {
0466         zero = PAGE_SIZE - pgbase;
0467         if (zero > len)
0468             zero = len;
0469 
0470         vpage = kmap_atomic(*page);
0471         memset(vpage + pgbase, 0, zero);
0472         kunmap_atomic(vpage);
0473 
0474         flush_dcache_page(*page);
0475         pgbase = 0;
0476         page++;
0477 
0478     } while ((len -= zero) != 0);
0479 }
0480 
0481 static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf,
0482                           unsigned int buflen, gfp_t gfp)
0483 {
0484     unsigned int i, npages, pagelen;
0485 
0486     if (!(buf->flags & XDRBUF_SPARSE_PAGES))
0487         return buflen;
0488     if (buflen <= buf->head->iov_len)
0489         return buflen;
0490     pagelen = buflen - buf->head->iov_len;
0491     if (pagelen > buf->page_len)
0492         pagelen = buf->page_len;
0493     npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
0494     for (i = 0; i < npages; i++) {
0495         if (!buf->pages[i])
0496             continue;
0497         buf->pages[i] = alloc_page(gfp);
0498         if (likely(buf->pages[i]))
0499             continue;
0500         buflen -= pagelen;
0501         pagelen = i << PAGE_SHIFT;
0502         if (pagelen > buf->page_base)
0503             buflen += pagelen - buf->page_base;
0504         break;
0505     }
0506     return buflen;
0507 }
0508 
0509 static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len)
0510 {
0511     struct kvec *head = buf->head;
0512     struct kvec *tail = buf->tail;
0513     unsigned int sum = head->iov_len + buf->page_len + tail->iov_len;
0514     unsigned int free_space, newlen;
0515 
0516     if (sum > buf->len) {
0517         free_space = min_t(unsigned int, sum - buf->len, len);
0518         newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space,
0519                            GFP_KERNEL);
0520         free_space = newlen - buf->len;
0521         buf->len = newlen;
0522         len -= free_space;
0523         if (!len)
0524             return;
0525     }
0526 
0527     if (buf->buflen > sum) {
0528         /* Expand the tail buffer */
0529         free_space = min_t(unsigned int, buf->buflen - sum, len);
0530         tail->iov_len += free_space;
0531         buf->len += free_space;
0532     }
0533 }
0534 
0535 static void xdr_buf_tail_copy_right(const struct xdr_buf *buf,
0536                     unsigned int base, unsigned int len,
0537                     unsigned int shift)
0538 {
0539     const struct kvec *tail = buf->tail;
0540     unsigned int to = base + shift;
0541 
0542     if (to >= tail->iov_len)
0543         return;
0544     if (len + to > tail->iov_len)
0545         len = tail->iov_len - to;
0546     memmove(tail->iov_base + to, tail->iov_base + base, len);
0547 }
0548 
0549 static void xdr_buf_pages_copy_right(const struct xdr_buf *buf,
0550                      unsigned int base, unsigned int len,
0551                      unsigned int shift)
0552 {
0553     const struct kvec *tail = buf->tail;
0554     unsigned int to = base + shift;
0555     unsigned int pglen = 0;
0556     unsigned int talen = 0, tato = 0;
0557 
0558     if (base >= buf->page_len)
0559         return;
0560     if (len > buf->page_len - base)
0561         len = buf->page_len - base;
0562     if (to >= buf->page_len) {
0563         tato = to - buf->page_len;
0564         if (tail->iov_len >= len + tato)
0565             talen = len;
0566         else if (tail->iov_len > tato)
0567             talen = tail->iov_len - tato;
0568     } else if (len + to >= buf->page_len) {
0569         pglen = buf->page_len - to;
0570         talen = len - pglen;
0571         if (talen > tail->iov_len)
0572             talen = tail->iov_len;
0573     } else
0574         pglen = len;
0575 
0576     _copy_from_pages(tail->iov_base + tato, buf->pages,
0577              buf->page_base + base + pglen, talen);
0578     _shift_data_right_pages(buf->pages, buf->page_base + to,
0579                 buf->page_base + base, pglen);
0580 }
0581 
0582 static void xdr_buf_head_copy_right(const struct xdr_buf *buf,
0583                     unsigned int base, unsigned int len,
0584                     unsigned int shift)
0585 {
0586     const struct kvec *head = buf->head;
0587     const struct kvec *tail = buf->tail;
0588     unsigned int to = base + shift;
0589     unsigned int pglen = 0, pgto = 0;
0590     unsigned int talen = 0, tato = 0;
0591 
0592     if (base >= head->iov_len)
0593         return;
0594     if (len > head->iov_len - base)
0595         len = head->iov_len - base;
0596     if (to >= buf->page_len + head->iov_len) {
0597         tato = to - buf->page_len - head->iov_len;
0598         talen = len;
0599     } else if (to >= head->iov_len) {
0600         pgto = to - head->iov_len;
0601         pglen = len;
0602         if (pgto + pglen > buf->page_len) {
0603             talen = pgto + pglen - buf->page_len;
0604             pglen -= talen;
0605         }
0606     } else {
0607         pglen = len - to;
0608         if (pglen > buf->page_len) {
0609             talen = pglen - buf->page_len;
0610             pglen = buf->page_len;
0611         }
0612     }
0613 
0614     len -= talen;
0615     base += len;
0616     if (talen + tato > tail->iov_len)
0617         talen = tail->iov_len > tato ? tail->iov_len - tato : 0;
0618     memcpy(tail->iov_base + tato, head->iov_base + base, talen);
0619 
0620     len -= pglen;
0621     base -= pglen;
0622     _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base,
0623                pglen);
0624 
0625     base -= len;
0626     memmove(head->iov_base + to, head->iov_base + base, len);
0627 }
0628 
0629 static void xdr_buf_tail_shift_right(const struct xdr_buf *buf,
0630                      unsigned int base, unsigned int len,
0631                      unsigned int shift)
0632 {
0633     const struct kvec *tail = buf->tail;
0634 
0635     if (base >= tail->iov_len || !shift || !len)
0636         return;
0637     xdr_buf_tail_copy_right(buf, base, len, shift);
0638 }
0639 
0640 static void xdr_buf_pages_shift_right(const struct xdr_buf *buf,
0641                       unsigned int base, unsigned int len,
0642                       unsigned int shift)
0643 {
0644     if (!shift || !len)
0645         return;
0646     if (base >= buf->page_len) {
0647         xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift);
0648         return;
0649     }
0650     if (base + len > buf->page_len)
0651         xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len,
0652                      shift);
0653     xdr_buf_pages_copy_right(buf, base, len, shift);
0654 }
0655 
0656 static void xdr_buf_head_shift_right(const struct xdr_buf *buf,
0657                      unsigned int base, unsigned int len,
0658                      unsigned int shift)
0659 {
0660     const struct kvec *head = buf->head;
0661 
0662     if (!shift)
0663         return;
0664     if (base >= head->iov_len) {
0665         xdr_buf_pages_shift_right(buf, head->iov_len - base, len,
0666                       shift);
0667         return;
0668     }
0669     if (base + len > head->iov_len)
0670         xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len,
0671                       shift);
0672     xdr_buf_head_copy_right(buf, base, len, shift);
0673 }
0674 
0675 static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base,
0676                    unsigned int len, unsigned int shift)
0677 {
0678     const struct kvec *tail = buf->tail;
0679 
0680     if (base >= tail->iov_len)
0681         return;
0682     if (len > tail->iov_len - base)
0683         len = tail->iov_len - base;
0684     /* Shift data into head */
0685     if (shift > buf->page_len + base) {
0686         const struct kvec *head = buf->head;
0687         unsigned int hdto =
0688             head->iov_len + buf->page_len + base - shift;
0689         unsigned int hdlen = len;
0690 
0691         if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
0692                   "SUNRPC: Misaligned data.\n"))
0693             return;
0694         if (hdto + hdlen > head->iov_len)
0695             hdlen = head->iov_len - hdto;
0696         memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
0697         base += hdlen;
0698         len -= hdlen;
0699         if (!len)
0700             return;
0701     }
0702     /* Shift data into pages */
0703     if (shift > base) {
0704         unsigned int pgto = buf->page_len + base - shift;
0705         unsigned int pglen = len;
0706 
0707         if (pgto + pglen > buf->page_len)
0708             pglen = buf->page_len - pgto;
0709         _copy_to_pages(buf->pages, buf->page_base + pgto,
0710                    tail->iov_base + base, pglen);
0711         base += pglen;
0712         len -= pglen;
0713         if (!len)
0714             return;
0715     }
0716     memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
0717 }
0718 
0719 static void xdr_buf_pages_copy_left(const struct xdr_buf *buf,
0720                     unsigned int base, unsigned int len,
0721                     unsigned int shift)
0722 {
0723     unsigned int pgto;
0724 
0725     if (base >= buf->page_len)
0726         return;
0727     if (len > buf->page_len - base)
0728         len = buf->page_len - base;
0729     /* Shift data into head */
0730     if (shift > base) {
0731         const struct kvec *head = buf->head;
0732         unsigned int hdto = head->iov_len + base - shift;
0733         unsigned int hdlen = len;
0734 
0735         if (WARN_ONCE(shift > head->iov_len + base,
0736                   "SUNRPC: Misaligned data.\n"))
0737             return;
0738         if (hdto + hdlen > head->iov_len)
0739             hdlen = head->iov_len - hdto;
0740         _copy_from_pages(head->iov_base + hdto, buf->pages,
0741                  buf->page_base + base, hdlen);
0742         base += hdlen;
0743         len -= hdlen;
0744         if (!len)
0745             return;
0746     }
0747     pgto = base - shift;
0748     _shift_data_left_pages(buf->pages, buf->page_base + pgto,
0749                    buf->page_base + base, len);
0750 }
0751 
0752 static void xdr_buf_tail_shift_left(const struct xdr_buf *buf,
0753                     unsigned int base, unsigned int len,
0754                     unsigned int shift)
0755 {
0756     if (!shift || !len)
0757         return;
0758     xdr_buf_tail_copy_left(buf, base, len, shift);
0759 }
0760 
0761 static void xdr_buf_pages_shift_left(const struct xdr_buf *buf,
0762                      unsigned int base, unsigned int len,
0763                      unsigned int shift)
0764 {
0765     if (!shift || !len)
0766         return;
0767     if (base >= buf->page_len) {
0768         xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift);
0769         return;
0770     }
0771     xdr_buf_pages_copy_left(buf, base, len, shift);
0772     len += base;
0773     if (len <= buf->page_len)
0774         return;
0775     xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
0776 }
0777 
0778 static void xdr_buf_head_shift_left(const struct xdr_buf *buf,
0779                     unsigned int base, unsigned int len,
0780                     unsigned int shift)
0781 {
0782     const struct kvec *head = buf->head;
0783     unsigned int bytes;
0784 
0785     if (!shift || !len)
0786         return;
0787 
0788     if (shift > base) {
0789         bytes = (shift - base);
0790         if (bytes >= len)
0791             return;
0792         base += bytes;
0793         len -= bytes;
0794     }
0795 
0796     if (base < head->iov_len) {
0797         bytes = min_t(unsigned int, len, head->iov_len - base);
0798         memmove(head->iov_base + (base - shift),
0799             head->iov_base + base, bytes);
0800         base += bytes;
0801         len -= bytes;
0802     }
0803     xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift);
0804 }
0805 
0806 /**
0807  * xdr_shrink_bufhead
0808  * @buf: xdr_buf
0809  * @len: new length of buf->head[0]
0810  *
0811  * Shrinks XDR buffer's header kvec buf->head[0], setting it to
0812  * 'len' bytes. The extra data is not lost, but is instead
0813  * moved into the inlined pages and/or the tail.
0814  */
0815 static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len)
0816 {
0817     struct kvec *head = buf->head;
0818     unsigned int shift, buflen = max(buf->len, len);
0819 
0820     WARN_ON_ONCE(len > head->iov_len);
0821     if (head->iov_len > buflen) {
0822         buf->buflen -= head->iov_len - buflen;
0823         head->iov_len = buflen;
0824     }
0825     if (len >= head->iov_len)
0826         return 0;
0827     shift = head->iov_len - len;
0828     xdr_buf_try_expand(buf, shift);
0829     xdr_buf_head_shift_right(buf, len, buflen - len, shift);
0830     head->iov_len = len;
0831     buf->buflen -= shift;
0832     buf->len -= shift;
0833     return shift;
0834 }
0835 
0836 /**
0837  * xdr_shrink_pagelen - shrinks buf->pages to @len bytes
0838  * @buf: xdr_buf
0839  * @len: new page buffer length
0840  *
0841  * The extra data is not lost, but is instead moved into buf->tail.
0842  * Returns the actual number of bytes moved.
0843  */
0844 static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len)
0845 {
0846     unsigned int shift, buflen = buf->len - buf->head->iov_len;
0847 
0848     WARN_ON_ONCE(len > buf->page_len);
0849     if (buf->head->iov_len >= buf->len || len > buflen)
0850         buflen = len;
0851     if (buf->page_len > buflen) {
0852         buf->buflen -= buf->page_len - buflen;
0853         buf->page_len = buflen;
0854     }
0855     if (len >= buf->page_len)
0856         return 0;
0857     shift = buf->page_len - len;
0858     xdr_buf_try_expand(buf, shift);
0859     xdr_buf_pages_shift_right(buf, len, buflen - len, shift);
0860     buf->page_len = len;
0861     buf->len -= shift;
0862     buf->buflen -= shift;
0863     return shift;
0864 }
0865 
0866 void
0867 xdr_shift_buf(struct xdr_buf *buf, size_t len)
0868 {
0869     xdr_shrink_bufhead(buf, buf->head->iov_len - len);
0870 }
0871 EXPORT_SYMBOL_GPL(xdr_shift_buf);
0872 
0873 /**
0874  * xdr_stream_pos - Return the current offset from the start of the xdr_stream
0875  * @xdr: pointer to struct xdr_stream
0876  */
0877 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
0878 {
0879     return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
0880 }
0881 EXPORT_SYMBOL_GPL(xdr_stream_pos);
0882 
0883 static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos)
0884 {
0885     unsigned int blen = xdr->buf->len;
0886 
0887     xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0;
0888 }
0889 
0890 static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos)
0891 {
0892     xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len);
0893 }
0894 
0895 /**
0896  * xdr_page_pos - Return the current offset from the start of the xdr pages
0897  * @xdr: pointer to struct xdr_stream
0898  */
0899 unsigned int xdr_page_pos(const struct xdr_stream *xdr)
0900 {
0901     unsigned int pos = xdr_stream_pos(xdr);
0902 
0903     WARN_ON(pos < xdr->buf->head[0].iov_len);
0904     return pos - xdr->buf->head[0].iov_len;
0905 }
0906 EXPORT_SYMBOL_GPL(xdr_page_pos);
0907 
0908 /**
0909  * xdr_init_encode - Initialize a struct xdr_stream for sending data.
0910  * @xdr: pointer to xdr_stream struct
0911  * @buf: pointer to XDR buffer in which to encode data
0912  * @p: current pointer inside XDR buffer
0913  * @rqst: pointer to controlling rpc_rqst, for debugging
0914  *
0915  * Note: at the moment the RPC client only passes the length of our
0916  *   scratch buffer in the xdr_buf's header kvec. Previously this
0917  *   meant we needed to call xdr_adjust_iovec() after encoding the
0918  *   data. With the new scheme, the xdr_stream manages the details
0919  *   of the buffer length, and takes care of adjusting the kvec
0920  *   length for us.
0921  */
0922 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
0923              struct rpc_rqst *rqst)
0924 {
0925     struct kvec *iov = buf->head;
0926     int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
0927 
0928     xdr_reset_scratch_buffer(xdr);
0929     BUG_ON(scratch_len < 0);
0930     xdr->buf = buf;
0931     xdr->iov = iov;
0932     xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
0933     xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
0934     BUG_ON(iov->iov_len > scratch_len);
0935 
0936     if (p != xdr->p && p != NULL) {
0937         size_t len;
0938 
0939         BUG_ON(p < xdr->p || p > xdr->end);
0940         len = (char *)p - (char *)xdr->p;
0941         xdr->p = p;
0942         buf->len += len;
0943         iov->iov_len += len;
0944     }
0945     xdr->rqst = rqst;
0946 }
0947 EXPORT_SYMBOL_GPL(xdr_init_encode);
0948 
0949 /**
0950  * __xdr_commit_encode - Ensure all data is written to buffer
0951  * @xdr: pointer to xdr_stream
0952  *
0953  * We handle encoding across page boundaries by giving the caller a
0954  * temporary location to write to, then later copying the data into
0955  * place; xdr_commit_encode does that copying.
0956  *
0957  * Normally the caller doesn't need to call this directly, as the
0958  * following xdr_reserve_space will do it.  But an explicit call may be
0959  * required at the end of encoding, or any other time when the xdr_buf
0960  * data might be read.
0961  */
0962 void __xdr_commit_encode(struct xdr_stream *xdr)
0963 {
0964     size_t shift = xdr->scratch.iov_len;
0965     void *page;
0966 
0967     page = page_address(*xdr->page_ptr);
0968     memcpy(xdr->scratch.iov_base, page, shift);
0969     memmove(page, page + shift, (void *)xdr->p - page);
0970     xdr_reset_scratch_buffer(xdr);
0971 }
0972 EXPORT_SYMBOL_GPL(__xdr_commit_encode);
0973 
0974 /*
0975  * The buffer space to be reserved crosses the boundary between
0976  * xdr->buf->head and xdr->buf->pages, or between two pages
0977  * in xdr->buf->pages.
0978  */
0979 static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
0980                            size_t nbytes)
0981 {
0982     int space_left;
0983     int frag1bytes, frag2bytes;
0984     void *p;
0985 
0986     if (nbytes > PAGE_SIZE)
0987         goto out_overflow; /* Bigger buffers require special handling */
0988     if (xdr->buf->len + nbytes > xdr->buf->buflen)
0989         goto out_overflow; /* Sorry, we're totally out of space */
0990     frag1bytes = (xdr->end - xdr->p) << 2;
0991     frag2bytes = nbytes - frag1bytes;
0992     if (xdr->iov)
0993         xdr->iov->iov_len += frag1bytes;
0994     else
0995         xdr->buf->page_len += frag1bytes;
0996     xdr->page_ptr++;
0997     xdr->iov = NULL;
0998 
0999     /*
1000      * If the last encode didn't end exactly on a page boundary, the
1001      * next one will straddle boundaries.  Encode into the next
1002      * page, then copy it back later in xdr_commit_encode.  We use
1003      * the "scratch" iov to track any temporarily unused fragment of
1004      * space at the end of the previous buffer:
1005      */
1006     xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
1007 
1008     /*
1009      * xdr->p is where the next encode will start after
1010      * xdr_commit_encode() has shifted this one back:
1011      */
1012     p = page_address(*xdr->page_ptr);
1013     xdr->p = p + frag2bytes;
1014     space_left = xdr->buf->buflen - xdr->buf->len;
1015     if (space_left - frag1bytes >= PAGE_SIZE)
1016         xdr->end = p + PAGE_SIZE;
1017     else
1018         xdr->end = p + space_left - frag1bytes;
1019 
1020     xdr->buf->page_len += frag2bytes;
1021     xdr->buf->len += nbytes;
1022     return p;
1023 out_overflow:
1024     trace_rpc_xdr_overflow(xdr, nbytes);
1025     return NULL;
1026 }
1027 
1028 /**
1029  * xdr_reserve_space - Reserve buffer space for sending
1030  * @xdr: pointer to xdr_stream
1031  * @nbytes: number of bytes to reserve
1032  *
1033  * Checks that we have enough buffer space to encode 'nbytes' more
1034  * bytes of data. If so, update the total xdr_buf length, and
1035  * adjust the length of the current kvec.
1036  */
1037 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
1038 {
1039     __be32 *p = xdr->p;
1040     __be32 *q;
1041 
1042     xdr_commit_encode(xdr);
1043     /* align nbytes on the next 32-bit boundary */
1044     nbytes += 3;
1045     nbytes &= ~3;
1046     q = p + (nbytes >> 2);
1047     if (unlikely(q > xdr->end || q < p))
1048         return xdr_get_next_encode_buffer(xdr, nbytes);
1049     xdr->p = q;
1050     if (xdr->iov)
1051         xdr->iov->iov_len += nbytes;
1052     else
1053         xdr->buf->page_len += nbytes;
1054     xdr->buf->len += nbytes;
1055     return p;
1056 }
1057 EXPORT_SYMBOL_GPL(xdr_reserve_space);
1058 
1059 
1060 /**
1061  * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
1062  * @xdr: pointer to xdr_stream
1063  * @vec: pointer to a kvec array
1064  * @nbytes: number of bytes to reserve
1065  *
1066  * Reserves enough buffer space to encode 'nbytes' of data and stores the
1067  * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
1068  * determined based on the number of bytes remaining in the current page to
1069  * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
1070  */
1071 int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
1072 {
1073     int thislen;
1074     int v = 0;
1075     __be32 *p;
1076 
1077     /*
1078      * svcrdma requires every READ payload to start somewhere
1079      * in xdr->pages.
1080      */
1081     if (xdr->iov == xdr->buf->head) {
1082         xdr->iov = NULL;
1083         xdr->end = xdr->p;
1084     }
1085 
1086     while (nbytes) {
1087         thislen = xdr->buf->page_len % PAGE_SIZE;
1088         thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
1089 
1090         p = xdr_reserve_space(xdr, thislen);
1091         if (!p)
1092             return -EIO;
1093 
1094         vec[v].iov_base = p;
1095         vec[v].iov_len = thislen;
1096         v++;
1097         nbytes -= thislen;
1098     }
1099 
1100     return v;
1101 }
1102 EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
1103 
1104 /**
1105  * xdr_truncate_encode - truncate an encode buffer
1106  * @xdr: pointer to xdr_stream
1107  * @len: new length of buffer
1108  *
1109  * Truncates the xdr stream, so that xdr->buf->len == len,
1110  * and xdr->p points at offset len from the start of the buffer, and
1111  * head, tail, and page lengths are adjusted to correspond.
1112  *
1113  * If this means moving xdr->p to a different buffer, we assume that
1114  * the end pointer should be set to the end of the current page,
1115  * except in the case of the head buffer when we assume the head
1116  * buffer's current length represents the end of the available buffer.
1117  *
1118  * This is *not* safe to use on a buffer that already has inlined page
1119  * cache pages (as in a zero-copy server read reply), except for the
1120  * simple case of truncating from one position in the tail to another.
1121  *
1122  */
1123 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
1124 {
1125     struct xdr_buf *buf = xdr->buf;
1126     struct kvec *head = buf->head;
1127     struct kvec *tail = buf->tail;
1128     int fraglen;
1129     int new;
1130 
1131     if (len > buf->len) {
1132         WARN_ON_ONCE(1);
1133         return;
1134     }
1135     xdr_commit_encode(xdr);
1136 
1137     fraglen = min_t(int, buf->len - len, tail->iov_len);
1138     tail->iov_len -= fraglen;
1139     buf->len -= fraglen;
1140     if (tail->iov_len) {
1141         xdr->p = tail->iov_base + tail->iov_len;
1142         WARN_ON_ONCE(!xdr->end);
1143         WARN_ON_ONCE(!xdr->iov);
1144         return;
1145     }
1146     WARN_ON_ONCE(fraglen);
1147     fraglen = min_t(int, buf->len - len, buf->page_len);
1148     buf->page_len -= fraglen;
1149     buf->len -= fraglen;
1150 
1151     new = buf->page_base + buf->page_len;
1152 
1153     xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
1154 
1155     if (buf->page_len) {
1156         xdr->p = page_address(*xdr->page_ptr);
1157         xdr->end = (void *)xdr->p + PAGE_SIZE;
1158         xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
1159         WARN_ON_ONCE(xdr->iov);
1160         return;
1161     }
1162     if (fraglen)
1163         xdr->end = head->iov_base + head->iov_len;
1164     /* (otherwise assume xdr->end is already set) */
1165     xdr->page_ptr--;
1166     head->iov_len = len;
1167     buf->len = len;
1168     xdr->p = head->iov_base + head->iov_len;
1169     xdr->iov = buf->head;
1170 }
1171 EXPORT_SYMBOL(xdr_truncate_encode);
1172 
1173 /**
1174  * xdr_restrict_buflen - decrease available buffer space
1175  * @xdr: pointer to xdr_stream
1176  * @newbuflen: new maximum number of bytes available
1177  *
1178  * Adjust our idea of how much space is available in the buffer.
1179  * If we've already used too much space in the buffer, returns -1.
1180  * If the available space is already smaller than newbuflen, returns 0
1181  * and does nothing.  Otherwise, adjusts xdr->buf->buflen to newbuflen
1182  * and ensures xdr->end is set at most offset newbuflen from the start
1183  * of the buffer.
1184  */
1185 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
1186 {
1187     struct xdr_buf *buf = xdr->buf;
1188     int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
1189     int end_offset = buf->len + left_in_this_buf;
1190 
1191     if (newbuflen < 0 || newbuflen < buf->len)
1192         return -1;
1193     if (newbuflen > buf->buflen)
1194         return 0;
1195     if (newbuflen < end_offset)
1196         xdr->end = (void *)xdr->end + newbuflen - end_offset;
1197     buf->buflen = newbuflen;
1198     return 0;
1199 }
1200 EXPORT_SYMBOL(xdr_restrict_buflen);
1201 
1202 /**
1203  * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
1204  * @xdr: pointer to xdr_stream
1205  * @pages: list of pages
1206  * @base: offset of first byte
1207  * @len: length of data in bytes
1208  *
1209  */
1210 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
1211          unsigned int len)
1212 {
1213     struct xdr_buf *buf = xdr->buf;
1214     struct kvec *iov = buf->tail;
1215     buf->pages = pages;
1216     buf->page_base = base;
1217     buf->page_len = len;
1218 
1219     iov->iov_base = (char *)xdr->p;
1220     iov->iov_len  = 0;
1221     xdr->iov = iov;
1222 
1223     if (len & 3) {
1224         unsigned int pad = 4 - (len & 3);
1225 
1226         BUG_ON(xdr->p >= xdr->end);
1227         iov->iov_base = (char *)xdr->p + (len & 3);
1228         iov->iov_len  += pad;
1229         len += pad;
1230         *xdr->p++ = 0;
1231     }
1232     buf->buflen += len;
1233     buf->len += len;
1234 }
1235 EXPORT_SYMBOL_GPL(xdr_write_pages);
1236 
1237 static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
1238                 unsigned int base, unsigned int len)
1239 {
1240     if (len > iov->iov_len)
1241         len = iov->iov_len;
1242     if (unlikely(base > len))
1243         base = len;
1244     xdr->p = (__be32*)(iov->iov_base + base);
1245     xdr->end = (__be32*)(iov->iov_base + len);
1246     xdr->iov = iov;
1247     xdr->page_ptr = NULL;
1248     return len - base;
1249 }
1250 
1251 static unsigned int xdr_set_tail_base(struct xdr_stream *xdr,
1252                       unsigned int base, unsigned int len)
1253 {
1254     struct xdr_buf *buf = xdr->buf;
1255 
1256     xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len);
1257     return xdr_set_iov(xdr, buf->tail, base, len);
1258 }
1259 
1260 static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
1261                       unsigned int base, unsigned int len)
1262 {
1263     unsigned int pgnr;
1264     unsigned int maxlen;
1265     unsigned int pgoff;
1266     unsigned int pgend;
1267     void *kaddr;
1268 
1269     maxlen = xdr->buf->page_len;
1270     if (base >= maxlen)
1271         return 0;
1272     else
1273         maxlen -= base;
1274     if (len > maxlen)
1275         len = maxlen;
1276 
1277     xdr_stream_page_set_pos(xdr, base);
1278     base += xdr->buf->page_base;
1279 
1280     pgnr = base >> PAGE_SHIFT;
1281     xdr->page_ptr = &xdr->buf->pages[pgnr];
1282     kaddr = page_address(*xdr->page_ptr);
1283 
1284     pgoff = base & ~PAGE_MASK;
1285     xdr->p = (__be32*)(kaddr + pgoff);
1286 
1287     pgend = pgoff + len;
1288     if (pgend > PAGE_SIZE)
1289         pgend = PAGE_SIZE;
1290     xdr->end = (__be32*)(kaddr + pgend);
1291     xdr->iov = NULL;
1292     return len;
1293 }
1294 
1295 static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
1296              unsigned int len)
1297 {
1298     if (xdr_set_page_base(xdr, base, len) == 0) {
1299         base -= xdr->buf->page_len;
1300         xdr_set_tail_base(xdr, base, len);
1301     }
1302 }
1303 
1304 static void xdr_set_next_page(struct xdr_stream *xdr)
1305 {
1306     unsigned int newbase;
1307 
1308     newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1309     newbase -= xdr->buf->page_base;
1310     if (newbase < xdr->buf->page_len)
1311         xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr));
1312     else
1313         xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr));
1314 }
1315 
1316 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
1317 {
1318     if (xdr->page_ptr != NULL)
1319         xdr_set_next_page(xdr);
1320     else if (xdr->iov == xdr->buf->head)
1321         xdr_set_page(xdr, 0, xdr_stream_remaining(xdr));
1322     return xdr->p != xdr->end;
1323 }
1324 
1325 /**
1326  * xdr_init_decode - Initialize an xdr_stream for decoding data.
1327  * @xdr: pointer to xdr_stream struct
1328  * @buf: pointer to XDR buffer from which to decode data
1329  * @p: current pointer inside XDR buffer
1330  * @rqst: pointer to controlling rpc_rqst, for debugging
1331  */
1332 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
1333              struct rpc_rqst *rqst)
1334 {
1335     xdr->buf = buf;
1336     xdr_reset_scratch_buffer(xdr);
1337     xdr->nwords = XDR_QUADLEN(buf->len);
1338     if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
1339         xdr_set_page_base(xdr, 0, buf->len) == 0)
1340         xdr_set_iov(xdr, buf->tail, 0, buf->len);
1341     if (p != NULL && p > xdr->p && xdr->end >= p) {
1342         xdr->nwords -= p - xdr->p;
1343         xdr->p = p;
1344     }
1345     xdr->rqst = rqst;
1346 }
1347 EXPORT_SYMBOL_GPL(xdr_init_decode);
1348 
1349 /**
1350  * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
1351  * @xdr: pointer to xdr_stream struct
1352  * @buf: pointer to XDR buffer from which to decode data
1353  * @pages: list of pages to decode into
1354  * @len: length in bytes of buffer in pages
1355  */
1356 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
1357                struct page **pages, unsigned int len)
1358 {
1359     memset(buf, 0, sizeof(*buf));
1360     buf->pages =  pages;
1361     buf->page_len =  len;
1362     buf->buflen =  len;
1363     buf->len = len;
1364     xdr_init_decode(xdr, buf, NULL, NULL);
1365 }
1366 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
1367 
1368 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1369 {
1370     unsigned int nwords = XDR_QUADLEN(nbytes);
1371     __be32 *p = xdr->p;
1372     __be32 *q = p + nwords;
1373 
1374     if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
1375         return NULL;
1376     xdr->p = q;
1377     xdr->nwords -= nwords;
1378     return p;
1379 }
1380 
1381 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
1382 {
1383     __be32 *p;
1384     char *cpdest = xdr->scratch.iov_base;
1385     size_t cplen = (char *)xdr->end - (char *)xdr->p;
1386 
1387     if (nbytes > xdr->scratch.iov_len)
1388         goto out_overflow;
1389     p = __xdr_inline_decode(xdr, cplen);
1390     if (p == NULL)
1391         return NULL;
1392     memcpy(cpdest, p, cplen);
1393     if (!xdr_set_next_buffer(xdr))
1394         goto out_overflow;
1395     cpdest += cplen;
1396     nbytes -= cplen;
1397     p = __xdr_inline_decode(xdr, nbytes);
1398     if (p == NULL)
1399         return NULL;
1400     memcpy(cpdest, p, nbytes);
1401     return xdr->scratch.iov_base;
1402 out_overflow:
1403     trace_rpc_xdr_overflow(xdr, nbytes);
1404     return NULL;
1405 }
1406 
1407 /**
1408  * xdr_inline_decode - Retrieve XDR data to decode
1409  * @xdr: pointer to xdr_stream struct
1410  * @nbytes: number of bytes of data to decode
1411  *
1412  * Check if the input buffer is long enough to enable us to decode
1413  * 'nbytes' more bytes of data starting at the current position.
1414  * If so return the current pointer, then update the current
1415  * pointer position.
1416  */
1417 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1418 {
1419     __be32 *p;
1420 
1421     if (unlikely(nbytes == 0))
1422         return xdr->p;
1423     if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1424         goto out_overflow;
1425     p = __xdr_inline_decode(xdr, nbytes);
1426     if (p != NULL)
1427         return p;
1428     return xdr_copy_to_scratch(xdr, nbytes);
1429 out_overflow:
1430     trace_rpc_xdr_overflow(xdr, nbytes);
1431     return NULL;
1432 }
1433 EXPORT_SYMBOL_GPL(xdr_inline_decode);
1434 
1435 static void xdr_realign_pages(struct xdr_stream *xdr)
1436 {
1437     struct xdr_buf *buf = xdr->buf;
1438     struct kvec *iov = buf->head;
1439     unsigned int cur = xdr_stream_pos(xdr);
1440     unsigned int copied;
1441 
1442     /* Realign pages to current pointer position */
1443     if (iov->iov_len > cur) {
1444         copied = xdr_shrink_bufhead(buf, cur);
1445         trace_rpc_xdr_alignment(xdr, cur, copied);
1446         xdr_set_page(xdr, 0, buf->page_len);
1447     }
1448 }
1449 
1450 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1451 {
1452     struct xdr_buf *buf = xdr->buf;
1453     unsigned int nwords = XDR_QUADLEN(len);
1454     unsigned int copied;
1455 
1456     if (xdr->nwords == 0)
1457         return 0;
1458 
1459     xdr_realign_pages(xdr);
1460     if (nwords > xdr->nwords) {
1461         nwords = xdr->nwords;
1462         len = nwords << 2;
1463     }
1464     if (buf->page_len <= len)
1465         len = buf->page_len;
1466     else if (nwords < xdr->nwords) {
1467         /* Truncate page data and move it into the tail */
1468         copied = xdr_shrink_pagelen(buf, len);
1469         trace_rpc_xdr_alignment(xdr, len, copied);
1470     }
1471     return len;
1472 }
1473 
1474 /**
1475  * xdr_read_pages - align page-based XDR data to current pointer position
1476  * @xdr: pointer to xdr_stream struct
1477  * @len: number of bytes of page data
1478  *
1479  * Moves data beyond the current pointer position from the XDR head[] buffer
1480  * into the page list. Any data that lies beyond current position + @len
1481  * bytes is moved into the XDR tail[]. The xdr_stream current position is
1482  * then advanced past that data to align to the next XDR object in the tail.
1483  *
1484  * Returns the number of XDR encoded bytes now contained in the pages
1485  */
1486 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1487 {
1488     unsigned int nwords = XDR_QUADLEN(len);
1489     unsigned int base, end, pglen;
1490 
1491     pglen = xdr_align_pages(xdr, nwords << 2);
1492     if (pglen == 0)
1493         return 0;
1494 
1495     base = (nwords << 2) - pglen;
1496     end = xdr_stream_remaining(xdr) - pglen;
1497 
1498     xdr_set_tail_base(xdr, base, end);
1499     return len <= pglen ? len : pglen;
1500 }
1501 EXPORT_SYMBOL_GPL(xdr_read_pages);
1502 
1503 /**
1504  * xdr_set_pagelen - Sets the length of the XDR pages
1505  * @xdr: pointer to xdr_stream struct
1506  * @len: new length of the XDR page data
1507  *
1508  * Either grows or shrinks the length of the xdr pages by setting pagelen to
1509  * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas
1510  * when growing any data beyond the current pointer is moved into the tail.
1511  *
1512  * Returns True if the operation was successful, and False otherwise.
1513  */
1514 void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len)
1515 {
1516     struct xdr_buf *buf = xdr->buf;
1517     size_t remaining = xdr_stream_remaining(xdr);
1518     size_t base = 0;
1519 
1520     if (len < buf->page_len) {
1521         base = buf->page_len - len;
1522         xdr_shrink_pagelen(buf, len);
1523     } else {
1524         xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr),
1525                      buf->page_len, remaining);
1526         if (len > buf->page_len)
1527             xdr_buf_try_expand(buf, len - buf->page_len);
1528     }
1529     xdr_set_tail_base(xdr, base, remaining);
1530 }
1531 EXPORT_SYMBOL_GPL(xdr_set_pagelen);
1532 
1533 /**
1534  * xdr_enter_page - decode data from the XDR page
1535  * @xdr: pointer to xdr_stream struct
1536  * @len: number of bytes of page data
1537  *
1538  * Moves data beyond the current pointer position from the XDR head[] buffer
1539  * into the page list. Any data that lies beyond current position + "len"
1540  * bytes is moved into the XDR tail[]. The current pointer is then
1541  * repositioned at the beginning of the first XDR page.
1542  */
1543 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1544 {
1545     len = xdr_align_pages(xdr, len);
1546     /*
1547      * Position current pointer at beginning of tail, and
1548      * set remaining message length.
1549      */
1550     if (len != 0)
1551         xdr_set_page_base(xdr, 0, len);
1552 }
1553 EXPORT_SYMBOL_GPL(xdr_enter_page);
1554 
1555 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1556 
1557 void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf)
1558 {
1559     buf->head[0] = *iov;
1560     buf->tail[0] = empty_iov;
1561     buf->page_len = 0;
1562     buf->buflen = buf->len = iov->iov_len;
1563 }
1564 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1565 
1566 /**
1567  * xdr_buf_subsegment - set subbuf to a portion of buf
1568  * @buf: an xdr buffer
1569  * @subbuf: the result buffer
1570  * @base: beginning of range in bytes
1571  * @len: length of range in bytes
1572  *
1573  * sets @subbuf to an xdr buffer representing the portion of @buf of
1574  * length @len starting at offset @base.
1575  *
1576  * @buf and @subbuf may be pointers to the same struct xdr_buf.
1577  *
1578  * Returns -1 if base of length are out of bounds.
1579  */
1580 int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
1581                unsigned int base, unsigned int len)
1582 {
1583     subbuf->buflen = subbuf->len = len;
1584     if (base < buf->head[0].iov_len) {
1585         subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1586         subbuf->head[0].iov_len = min_t(unsigned int, len,
1587                         buf->head[0].iov_len - base);
1588         len -= subbuf->head[0].iov_len;
1589         base = 0;
1590     } else {
1591         base -= buf->head[0].iov_len;
1592         subbuf->head[0].iov_base = buf->head[0].iov_base;
1593         subbuf->head[0].iov_len = 0;
1594     }
1595 
1596     if (base < buf->page_len) {
1597         subbuf->page_len = min(buf->page_len - base, len);
1598         base += buf->page_base;
1599         subbuf->page_base = base & ~PAGE_MASK;
1600         subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1601         len -= subbuf->page_len;
1602         base = 0;
1603     } else {
1604         base -= buf->page_len;
1605         subbuf->pages = buf->pages;
1606         subbuf->page_base = 0;
1607         subbuf->page_len = 0;
1608     }
1609 
1610     if (base < buf->tail[0].iov_len) {
1611         subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1612         subbuf->tail[0].iov_len = min_t(unsigned int, len,
1613                         buf->tail[0].iov_len - base);
1614         len -= subbuf->tail[0].iov_len;
1615         base = 0;
1616     } else {
1617         base -= buf->tail[0].iov_len;
1618         subbuf->tail[0].iov_base = buf->tail[0].iov_base;
1619         subbuf->tail[0].iov_len = 0;
1620     }
1621 
1622     if (base || len)
1623         return -1;
1624     return 0;
1625 }
1626 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1627 
1628 /**
1629  * xdr_stream_subsegment - set @subbuf to a portion of @xdr
1630  * @xdr: an xdr_stream set up for decoding
1631  * @subbuf: the result buffer
1632  * @nbytes: length of @xdr to extract, in bytes
1633  *
1634  * Sets up @subbuf to represent a portion of @xdr. The portion
1635  * starts at the current offset in @xdr, and extends for a length
1636  * of @nbytes. If this is successful, @xdr is advanced to the next
1637  * XDR data item following that portion.
1638  *
1639  * Return values:
1640  *   %true: @subbuf has been initialized, and @xdr has been advanced.
1641  *   %false: a bounds error has occurred
1642  */
1643 bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
1644                unsigned int nbytes)
1645 {
1646     unsigned int start = xdr_stream_pos(xdr);
1647     unsigned int remaining, len;
1648 
1649     /* Extract @subbuf and bounds-check the fn arguments */
1650     if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
1651         return false;
1652 
1653     /* Advance @xdr by @nbytes */
1654     for (remaining = nbytes; remaining;) {
1655         if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1656             return false;
1657 
1658         len = (char *)xdr->end - (char *)xdr->p;
1659         if (remaining <= len) {
1660             xdr->p = (__be32 *)((char *)xdr->p +
1661                     (remaining + xdr_pad_size(nbytes)));
1662             break;
1663         }
1664 
1665         xdr->p = (__be32 *)((char *)xdr->p + len);
1666         xdr->end = xdr->p;
1667         remaining -= len;
1668     }
1669 
1670     xdr_stream_set_pos(xdr, start + nbytes);
1671     return true;
1672 }
1673 EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
1674 
1675 /**
1676  * xdr_stream_move_subsegment - Move part of a stream to another position
1677  * @xdr: the source xdr_stream
1678  * @offset: the source offset of the segment
1679  * @target: the target offset of the segment
1680  * @length: the number of bytes to move
1681  *
1682  * Moves @length bytes from @offset to @target in the xdr_stream, overwriting
1683  * anything in its space. Returns the number of bytes in the segment.
1684  */
1685 unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
1686                     unsigned int target, unsigned int length)
1687 {
1688     struct xdr_buf buf;
1689     unsigned int shift;
1690 
1691     if (offset < target) {
1692         shift = target - offset;
1693         if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0)
1694             return 0;
1695         xdr_buf_head_shift_right(&buf, 0, length, shift);
1696     } else if (offset > target) {
1697         shift = offset - target;
1698         if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0)
1699             return 0;
1700         xdr_buf_head_shift_left(&buf, shift, length, shift);
1701     }
1702     return length;
1703 }
1704 EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment);
1705 
1706 /**
1707  * xdr_stream_zero - zero out a portion of an xdr_stream
1708  * @xdr: an xdr_stream to zero out
1709  * @offset: the starting point in the stream
1710  * @length: the number of bytes to zero
1711  */
1712 unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
1713                  unsigned int length)
1714 {
1715     struct xdr_buf buf;
1716 
1717     if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0)
1718         return 0;
1719     if (buf.head[0].iov_len)
1720         xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len);
1721     if (buf.page_len > 0)
1722         xdr_buf_pages_zero(&buf, 0, buf.page_len);
1723     if (buf.tail[0].iov_len)
1724         xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len);
1725     return length;
1726 }
1727 EXPORT_SYMBOL_GPL(xdr_stream_zero);
1728 
1729 /**
1730  * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1731  * @buf: buf to be trimmed
1732  * @len: number of bytes to reduce "buf" by
1733  *
1734  * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1735  * that it's possible that we'll trim less than that amount if the xdr_buf is
1736  * too small, or if (for instance) it's all in the head and the parser has
1737  * already read too far into it.
1738  */
1739 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1740 {
1741     size_t cur;
1742     unsigned int trim = len;
1743 
1744     if (buf->tail[0].iov_len) {
1745         cur = min_t(size_t, buf->tail[0].iov_len, trim);
1746         buf->tail[0].iov_len -= cur;
1747         trim -= cur;
1748         if (!trim)
1749             goto fix_len;
1750     }
1751 
1752     if (buf->page_len) {
1753         cur = min_t(unsigned int, buf->page_len, trim);
1754         buf->page_len -= cur;
1755         trim -= cur;
1756         if (!trim)
1757             goto fix_len;
1758     }
1759 
1760     if (buf->head[0].iov_len) {
1761         cur = min_t(size_t, buf->head[0].iov_len, trim);
1762         buf->head[0].iov_len -= cur;
1763         trim -= cur;
1764     }
1765 fix_len:
1766     buf->len -= (len - trim);
1767 }
1768 EXPORT_SYMBOL_GPL(xdr_buf_trim);
1769 
1770 static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf,
1771                       void *obj, unsigned int len)
1772 {
1773     unsigned int this_len;
1774 
1775     this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1776     memcpy(obj, subbuf->head[0].iov_base, this_len);
1777     len -= this_len;
1778     obj += this_len;
1779     this_len = min_t(unsigned int, len, subbuf->page_len);
1780     _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1781     len -= this_len;
1782     obj += this_len;
1783     this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1784     memcpy(obj, subbuf->tail[0].iov_base, this_len);
1785 }
1786 
1787 /* obj is assumed to point to allocated memory of size at least len: */
1788 int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base,
1789                 void *obj, unsigned int len)
1790 {
1791     struct xdr_buf subbuf;
1792     int status;
1793 
1794     status = xdr_buf_subsegment(buf, &subbuf, base, len);
1795     if (status != 0)
1796         return status;
1797     __read_bytes_from_xdr_buf(&subbuf, obj, len);
1798     return 0;
1799 }
1800 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1801 
1802 static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf,
1803                      void *obj, unsigned int len)
1804 {
1805     unsigned int this_len;
1806 
1807     this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1808     memcpy(subbuf->head[0].iov_base, obj, this_len);
1809     len -= this_len;
1810     obj += this_len;
1811     this_len = min_t(unsigned int, len, subbuf->page_len);
1812     _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1813     len -= this_len;
1814     obj += this_len;
1815     this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1816     memcpy(subbuf->tail[0].iov_base, obj, this_len);
1817 }
1818 
1819 /* obj is assumed to point to allocated memory of size at least len: */
1820 int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base,
1821                void *obj, unsigned int len)
1822 {
1823     struct xdr_buf subbuf;
1824     int status;
1825 
1826     status = xdr_buf_subsegment(buf, &subbuf, base, len);
1827     if (status != 0)
1828         return status;
1829     __write_bytes_to_xdr_buf(&subbuf, obj, len);
1830     return 0;
1831 }
1832 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1833 
1834 int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj)
1835 {
1836     __be32  raw;
1837     int status;
1838 
1839     status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1840     if (status)
1841         return status;
1842     *obj = be32_to_cpu(raw);
1843     return 0;
1844 }
1845 EXPORT_SYMBOL_GPL(xdr_decode_word);
1846 
1847 int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj)
1848 {
1849     __be32  raw = cpu_to_be32(obj);
1850 
1851     return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1852 }
1853 EXPORT_SYMBOL_GPL(xdr_encode_word);
1854 
1855 /* Returns 0 on success, or else a negative error code. */
1856 static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base,
1857                 struct xdr_array2_desc *desc, int encode)
1858 {
1859     char *elem = NULL, *c;
1860     unsigned int copied = 0, todo, avail_here;
1861     struct page **ppages = NULL;
1862     int err;
1863 
1864     if (encode) {
1865         if (xdr_encode_word(buf, base, desc->array_len) != 0)
1866             return -EINVAL;
1867     } else {
1868         if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1869             desc->array_len > desc->array_maxlen ||
1870             (unsigned long) base + 4 + desc->array_len *
1871                     desc->elem_size > buf->len)
1872             return -EINVAL;
1873     }
1874     base += 4;
1875 
1876     if (!desc->xcode)
1877         return 0;
1878 
1879     todo = desc->array_len * desc->elem_size;
1880 
1881     /* process head */
1882     if (todo && base < buf->head->iov_len) {
1883         c = buf->head->iov_base + base;
1884         avail_here = min_t(unsigned int, todo,
1885                    buf->head->iov_len - base);
1886         todo -= avail_here;
1887 
1888         while (avail_here >= desc->elem_size) {
1889             err = desc->xcode(desc, c);
1890             if (err)
1891                 goto out;
1892             c += desc->elem_size;
1893             avail_here -= desc->elem_size;
1894         }
1895         if (avail_here) {
1896             if (!elem) {
1897                 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1898                 err = -ENOMEM;
1899                 if (!elem)
1900                     goto out;
1901             }
1902             if (encode) {
1903                 err = desc->xcode(desc, elem);
1904                 if (err)
1905                     goto out;
1906                 memcpy(c, elem, avail_here);
1907             } else
1908                 memcpy(elem, c, avail_here);
1909             copied = avail_here;
1910         }
1911         base = buf->head->iov_len;  /* align to start of pages */
1912     }
1913 
1914     /* process pages array */
1915     base -= buf->head->iov_len;
1916     if (todo && base < buf->page_len) {
1917         unsigned int avail_page;
1918 
1919         avail_here = min(todo, buf->page_len - base);
1920         todo -= avail_here;
1921 
1922         base += buf->page_base;
1923         ppages = buf->pages + (base >> PAGE_SHIFT);
1924         base &= ~PAGE_MASK;
1925         avail_page = min_t(unsigned int, PAGE_SIZE - base,
1926                     avail_here);
1927         c = kmap(*ppages) + base;
1928 
1929         while (avail_here) {
1930             avail_here -= avail_page;
1931             if (copied || avail_page < desc->elem_size) {
1932                 unsigned int l = min(avail_page,
1933                     desc->elem_size - copied);
1934                 if (!elem) {
1935                     elem = kmalloc(desc->elem_size,
1936                                GFP_KERNEL);
1937                     err = -ENOMEM;
1938                     if (!elem)
1939                         goto out;
1940                 }
1941                 if (encode) {
1942                     if (!copied) {
1943                         err = desc->xcode(desc, elem);
1944                         if (err)
1945                             goto out;
1946                     }
1947                     memcpy(c, elem + copied, l);
1948                     copied += l;
1949                     if (copied == desc->elem_size)
1950                         copied = 0;
1951                 } else {
1952                     memcpy(elem + copied, c, l);
1953                     copied += l;
1954                     if (copied == desc->elem_size) {
1955                         err = desc->xcode(desc, elem);
1956                         if (err)
1957                             goto out;
1958                         copied = 0;
1959                     }
1960                 }
1961                 avail_page -= l;
1962                 c += l;
1963             }
1964             while (avail_page >= desc->elem_size) {
1965                 err = desc->xcode(desc, c);
1966                 if (err)
1967                     goto out;
1968                 c += desc->elem_size;
1969                 avail_page -= desc->elem_size;
1970             }
1971             if (avail_page) {
1972                 unsigned int l = min(avail_page,
1973                         desc->elem_size - copied);
1974                 if (!elem) {
1975                     elem = kmalloc(desc->elem_size,
1976                                GFP_KERNEL);
1977                     err = -ENOMEM;
1978                     if (!elem)
1979                         goto out;
1980                 }
1981                 if (encode) {
1982                     if (!copied) {
1983                         err = desc->xcode(desc, elem);
1984                         if (err)
1985                             goto out;
1986                     }
1987                     memcpy(c, elem + copied, l);
1988                     copied += l;
1989                     if (copied == desc->elem_size)
1990                         copied = 0;
1991                 } else {
1992                     memcpy(elem + copied, c, l);
1993                     copied += l;
1994                     if (copied == desc->elem_size) {
1995                         err = desc->xcode(desc, elem);
1996                         if (err)
1997                             goto out;
1998                         copied = 0;
1999                     }
2000                 }
2001             }
2002             if (avail_here) {
2003                 kunmap(*ppages);
2004                 ppages++;
2005                 c = kmap(*ppages);
2006             }
2007 
2008             avail_page = min(avail_here,
2009                  (unsigned int) PAGE_SIZE);
2010         }
2011         base = buf->page_len;  /* align to start of tail */
2012     }
2013 
2014     /* process tail */
2015     base -= buf->page_len;
2016     if (todo) {
2017         c = buf->tail->iov_base + base;
2018         if (copied) {
2019             unsigned int l = desc->elem_size - copied;
2020 
2021             if (encode)
2022                 memcpy(c, elem + copied, l);
2023             else {
2024                 memcpy(elem + copied, c, l);
2025                 err = desc->xcode(desc, elem);
2026                 if (err)
2027                     goto out;
2028             }
2029             todo -= l;
2030             c += l;
2031         }
2032         while (todo) {
2033             err = desc->xcode(desc, c);
2034             if (err)
2035                 goto out;
2036             c += desc->elem_size;
2037             todo -= desc->elem_size;
2038         }
2039     }
2040     err = 0;
2041 
2042 out:
2043     kfree(elem);
2044     if (ppages)
2045         kunmap(*ppages);
2046     return err;
2047 }
2048 
2049 int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
2050               struct xdr_array2_desc *desc)
2051 {
2052     if (base >= buf->len)
2053         return -EINVAL;
2054 
2055     return xdr_xcode_array2(buf, base, desc, 0);
2056 }
2057 EXPORT_SYMBOL_GPL(xdr_decode_array2);
2058 
2059 int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
2060               struct xdr_array2_desc *desc)
2061 {
2062     if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
2063         buf->head->iov_len + buf->page_len + buf->tail->iov_len)
2064         return -EINVAL;
2065 
2066     return xdr_xcode_array2(buf, base, desc, 1);
2067 }
2068 EXPORT_SYMBOL_GPL(xdr_encode_array2);
2069 
2070 int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset,
2071             unsigned int len,
2072             int (*actor)(struct scatterlist *, void *), void *data)
2073 {
2074     int i, ret = 0;
2075     unsigned int page_len, thislen, page_offset;
2076     struct scatterlist      sg[1];
2077 
2078     sg_init_table(sg, 1);
2079 
2080     if (offset >= buf->head[0].iov_len) {
2081         offset -= buf->head[0].iov_len;
2082     } else {
2083         thislen = buf->head[0].iov_len - offset;
2084         if (thislen > len)
2085             thislen = len;
2086         sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
2087         ret = actor(sg, data);
2088         if (ret)
2089             goto out;
2090         offset = 0;
2091         len -= thislen;
2092     }
2093     if (len == 0)
2094         goto out;
2095 
2096     if (offset >= buf->page_len) {
2097         offset -= buf->page_len;
2098     } else {
2099         page_len = buf->page_len - offset;
2100         if (page_len > len)
2101             page_len = len;
2102         len -= page_len;
2103         page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
2104         i = (offset + buf->page_base) >> PAGE_SHIFT;
2105         thislen = PAGE_SIZE - page_offset;
2106         do {
2107             if (thislen > page_len)
2108                 thislen = page_len;
2109             sg_set_page(sg, buf->pages[i], thislen, page_offset);
2110             ret = actor(sg, data);
2111             if (ret)
2112                 goto out;
2113             page_len -= thislen;
2114             i++;
2115             page_offset = 0;
2116             thislen = PAGE_SIZE;
2117         } while (page_len != 0);
2118         offset = 0;
2119     }
2120     if (len == 0)
2121         goto out;
2122     if (offset < buf->tail[0].iov_len) {
2123         thislen = buf->tail[0].iov_len - offset;
2124         if (thislen > len)
2125             thislen = len;
2126         sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
2127         ret = actor(sg, data);
2128         len -= thislen;
2129     }
2130     if (len != 0)
2131         ret = -EINVAL;
2132 out:
2133     return ret;
2134 }
2135 EXPORT_SYMBOL_GPL(xdr_process_buf);
2136 
2137 /**
2138  * xdr_stream_decode_opaque - Decode variable length opaque
2139  * @xdr: pointer to xdr_stream
2140  * @ptr: location to store opaque data
2141  * @size: size of storage buffer @ptr
2142  *
2143  * Return values:
2144  *   On success, returns size of object stored in *@ptr
2145  *   %-EBADMSG on XDR buffer overflow
2146  *   %-EMSGSIZE on overflow of storage buffer @ptr
2147  */
2148 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
2149 {
2150     ssize_t ret;
2151     void *p;
2152 
2153     ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
2154     if (ret <= 0)
2155         return ret;
2156     memcpy(ptr, p, ret);
2157     return ret;
2158 }
2159 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
2160 
2161 /**
2162  * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
2163  * @xdr: pointer to xdr_stream
2164  * @ptr: location to store pointer to opaque data
2165  * @maxlen: maximum acceptable object size
2166  * @gfp_flags: GFP mask to use
2167  *
2168  * Return values:
2169  *   On success, returns size of object stored in *@ptr
2170  *   %-EBADMSG on XDR buffer overflow
2171  *   %-EMSGSIZE if the size of the object would exceed @maxlen
2172  *   %-ENOMEM on memory allocation failure
2173  */
2174 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
2175         size_t maxlen, gfp_t gfp_flags)
2176 {
2177     ssize_t ret;
2178     void *p;
2179 
2180     ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
2181     if (ret > 0) {
2182         *ptr = kmemdup(p, ret, gfp_flags);
2183         if (*ptr != NULL)
2184             return ret;
2185         ret = -ENOMEM;
2186     }
2187     *ptr = NULL;
2188     return ret;
2189 }
2190 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
2191 
2192 /**
2193  * xdr_stream_decode_string - Decode variable length string
2194  * @xdr: pointer to xdr_stream
2195  * @str: location to store string
2196  * @size: size of storage buffer @str
2197  *
2198  * Return values:
2199  *   On success, returns length of NUL-terminated string stored in *@str
2200  *   %-EBADMSG on XDR buffer overflow
2201  *   %-EMSGSIZE on overflow of storage buffer @str
2202  */
2203 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
2204 {
2205     ssize_t ret;
2206     void *p;
2207 
2208     ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
2209     if (ret > 0) {
2210         memcpy(str, p, ret);
2211         str[ret] = '\0';
2212         return strlen(str);
2213     }
2214     *str = '\0';
2215     return ret;
2216 }
2217 EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
2218 
2219 /**
2220  * xdr_stream_decode_string_dup - Decode and duplicate variable length string
2221  * @xdr: pointer to xdr_stream
2222  * @str: location to store pointer to string
2223  * @maxlen: maximum acceptable string length
2224  * @gfp_flags: GFP mask to use
2225  *
2226  * Return values:
2227  *   On success, returns length of NUL-terminated string stored in *@ptr
2228  *   %-EBADMSG on XDR buffer overflow
2229  *   %-EMSGSIZE if the size of the string would exceed @maxlen
2230  *   %-ENOMEM on memory allocation failure
2231  */
2232 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
2233         size_t maxlen, gfp_t gfp_flags)
2234 {
2235     void *p;
2236     ssize_t ret;
2237 
2238     ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
2239     if (ret > 0) {
2240         char *s = kmemdup_nul(p, ret, gfp_flags);
2241         if (s != NULL) {
2242             *str = s;
2243             return strlen(s);
2244         }
2245         ret = -ENOMEM;
2246     }
2247     *str = NULL;
2248     return ret;
2249 }
2250 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);