cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xdr.c (56492B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * linux/net/sunrpc/xdr.c
      4 *
      5 * Generic XDR support.
      6 *
      7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
      8 */
      9
     10#include <linux/module.h>
     11#include <linux/slab.h>
     12#include <linux/types.h>
     13#include <linux/string.h>
     14#include <linux/kernel.h>
     15#include <linux/pagemap.h>
     16#include <linux/errno.h>
     17#include <linux/sunrpc/xdr.h>
     18#include <linux/sunrpc/msg_prot.h>
     19#include <linux/bvec.h>
     20#include <trace/events/sunrpc.h>
     21
     22static void _copy_to_pages(struct page **, size_t, const char *, size_t);
     23
     24
     25/*
     26 * XDR functions for basic NFS types
     27 */
     28__be32 *
     29xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
     30{
     31	unsigned int	quadlen = XDR_QUADLEN(obj->len);
     32
     33	p[quadlen] = 0;		/* zero trailing bytes */
     34	*p++ = cpu_to_be32(obj->len);
     35	memcpy(p, obj->data, obj->len);
     36	return p + XDR_QUADLEN(obj->len);
     37}
     38EXPORT_SYMBOL_GPL(xdr_encode_netobj);
     39
     40__be32 *
     41xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
     42{
     43	unsigned int	len;
     44
     45	if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
     46		return NULL;
     47	obj->len  = len;
     48	obj->data = (u8 *) p;
     49	return p + XDR_QUADLEN(len);
     50}
     51EXPORT_SYMBOL_GPL(xdr_decode_netobj);
     52
     53/**
     54 * xdr_encode_opaque_fixed - Encode fixed length opaque data
     55 * @p: pointer to current position in XDR buffer.
     56 * @ptr: pointer to data to encode (or NULL)
     57 * @nbytes: size of data.
     58 *
     59 * Copy the array of data of length nbytes at ptr to the XDR buffer
     60 * at position p, then align to the next 32-bit boundary by padding
     61 * with zero bytes (see RFC1832).
     62 * Note: if ptr is NULL, only the padding is performed.
     63 *
     64 * Returns the updated current XDR buffer position
     65 *
     66 */
     67__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
     68{
     69	if (likely(nbytes != 0)) {
     70		unsigned int quadlen = XDR_QUADLEN(nbytes);
     71		unsigned int padding = (quadlen << 2) - nbytes;
     72
     73		if (ptr != NULL)
     74			memcpy(p, ptr, nbytes);
     75		if (padding != 0)
     76			memset((char *)p + nbytes, 0, padding);
     77		p += quadlen;
     78	}
     79	return p;
     80}
     81EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
     82
     83/**
     84 * xdr_encode_opaque - Encode variable length opaque data
     85 * @p: pointer to current position in XDR buffer.
     86 * @ptr: pointer to data to encode (or NULL)
     87 * @nbytes: size of data.
     88 *
     89 * Returns the updated current XDR buffer position
     90 */
     91__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
     92{
     93	*p++ = cpu_to_be32(nbytes);
     94	return xdr_encode_opaque_fixed(p, ptr, nbytes);
     95}
     96EXPORT_SYMBOL_GPL(xdr_encode_opaque);
     97
     98__be32 *
     99xdr_encode_string(__be32 *p, const char *string)
    100{
    101	return xdr_encode_array(p, string, strlen(string));
    102}
    103EXPORT_SYMBOL_GPL(xdr_encode_string);
    104
    105__be32 *
    106xdr_decode_string_inplace(__be32 *p, char **sp,
    107			  unsigned int *lenp, unsigned int maxlen)
    108{
    109	u32 len;
    110
    111	len = be32_to_cpu(*p++);
    112	if (len > maxlen)
    113		return NULL;
    114	*lenp = len;
    115	*sp = (char *) p;
    116	return p + XDR_QUADLEN(len);
    117}
    118EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
    119
    120/**
    121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
    122 * @buf: XDR buffer where string resides
    123 * @len: length of string, in bytes
    124 *
    125 */
    126void xdr_terminate_string(const struct xdr_buf *buf, const u32 len)
    127{
    128	char *kaddr;
    129
    130	kaddr = kmap_atomic(buf->pages[0]);
    131	kaddr[buf->page_base + len] = '\0';
    132	kunmap_atomic(kaddr);
    133}
    134EXPORT_SYMBOL_GPL(xdr_terminate_string);
    135
    136size_t xdr_buf_pagecount(const struct xdr_buf *buf)
    137{
    138	if (!buf->page_len)
    139		return 0;
    140	return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
    141}
    142
    143int
    144xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
    145{
    146	size_t i, n = xdr_buf_pagecount(buf);
    147
    148	if (n != 0 && buf->bvec == NULL) {
    149		buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
    150		if (!buf->bvec)
    151			return -ENOMEM;
    152		for (i = 0; i < n; i++) {
    153			buf->bvec[i].bv_page = buf->pages[i];
    154			buf->bvec[i].bv_len = PAGE_SIZE;
    155			buf->bvec[i].bv_offset = 0;
    156		}
    157	}
    158	return 0;
    159}
    160
    161void
    162xdr_free_bvec(struct xdr_buf *buf)
    163{
    164	kfree(buf->bvec);
    165	buf->bvec = NULL;
    166}
    167
    168/**
    169 * xdr_inline_pages - Prepare receive buffer for a large reply
    170 * @xdr: xdr_buf into which reply will be placed
    171 * @offset: expected offset where data payload will start, in bytes
    172 * @pages: vector of struct page pointers
    173 * @base: offset in first page where receive should start, in bytes
    174 * @len: expected size of the upper layer data payload, in bytes
    175 *
    176 */
    177void
    178xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
    179		 struct page **pages, unsigned int base, unsigned int len)
    180{
    181	struct kvec *head = xdr->head;
    182	struct kvec *tail = xdr->tail;
    183	char *buf = (char *)head->iov_base;
    184	unsigned int buflen = head->iov_len;
    185
    186	head->iov_len  = offset;
    187
    188	xdr->pages = pages;
    189	xdr->page_base = base;
    190	xdr->page_len = len;
    191
    192	tail->iov_base = buf + offset;
    193	tail->iov_len = buflen - offset;
    194	xdr->buflen += len;
    195}
    196EXPORT_SYMBOL_GPL(xdr_inline_pages);
    197
    198/*
    199 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
    200 */
    201
    202/**
    203 * _shift_data_left_pages
    204 * @pages: vector of pages containing both the source and dest memory area.
    205 * @pgto_base: page vector address of destination
    206 * @pgfrom_base: page vector address of source
    207 * @len: number of bytes to copy
    208 *
    209 * Note: the addresses pgto_base and pgfrom_base are both calculated in
    210 *       the same way:
    211 *            if a memory area starts at byte 'base' in page 'pages[i]',
    212 *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
    213 * Alse note: pgto_base must be < pgfrom_base, but the memory areas
    214 * 	they point to may overlap.
    215 */
    216static void
    217_shift_data_left_pages(struct page **pages, size_t pgto_base,
    218			size_t pgfrom_base, size_t len)
    219{
    220	struct page **pgfrom, **pgto;
    221	char *vfrom, *vto;
    222	size_t copy;
    223
    224	BUG_ON(pgfrom_base <= pgto_base);
    225
    226	if (!len)
    227		return;
    228
    229	pgto = pages + (pgto_base >> PAGE_SHIFT);
    230	pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
    231
    232	pgto_base &= ~PAGE_MASK;
    233	pgfrom_base &= ~PAGE_MASK;
    234
    235	do {
    236		if (pgto_base >= PAGE_SIZE) {
    237			pgto_base = 0;
    238			pgto++;
    239		}
    240		if (pgfrom_base >= PAGE_SIZE){
    241			pgfrom_base = 0;
    242			pgfrom++;
    243		}
    244
    245		copy = len;
    246		if (copy > (PAGE_SIZE - pgto_base))
    247			copy = PAGE_SIZE - pgto_base;
    248		if (copy > (PAGE_SIZE - pgfrom_base))
    249			copy = PAGE_SIZE - pgfrom_base;
    250
    251		vto = kmap_atomic(*pgto);
    252		if (*pgto != *pgfrom) {
    253			vfrom = kmap_atomic(*pgfrom);
    254			memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
    255			kunmap_atomic(vfrom);
    256		} else
    257			memmove(vto + pgto_base, vto + pgfrom_base, copy);
    258		flush_dcache_page(*pgto);
    259		kunmap_atomic(vto);
    260
    261		pgto_base += copy;
    262		pgfrom_base += copy;
    263
    264	} while ((len -= copy) != 0);
    265}
    266
    267/**
    268 * _shift_data_right_pages
    269 * @pages: vector of pages containing both the source and dest memory area.
    270 * @pgto_base: page vector address of destination
    271 * @pgfrom_base: page vector address of source
    272 * @len: number of bytes to copy
    273 *
    274 * Note: the addresses pgto_base and pgfrom_base are both calculated in
    275 *       the same way:
    276 *            if a memory area starts at byte 'base' in page 'pages[i]',
    277 *            then its address is given as (i << PAGE_SHIFT) + base
    278 * Also note: pgfrom_base must be < pgto_base, but the memory areas
    279 * 	they point to may overlap.
    280 */
    281static void
    282_shift_data_right_pages(struct page **pages, size_t pgto_base,
    283		size_t pgfrom_base, size_t len)
    284{
    285	struct page **pgfrom, **pgto;
    286	char *vfrom, *vto;
    287	size_t copy;
    288
    289	BUG_ON(pgto_base <= pgfrom_base);
    290
    291	if (!len)
    292		return;
    293
    294	pgto_base += len;
    295	pgfrom_base += len;
    296
    297	pgto = pages + (pgto_base >> PAGE_SHIFT);
    298	pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
    299
    300	pgto_base &= ~PAGE_MASK;
    301	pgfrom_base &= ~PAGE_MASK;
    302
    303	do {
    304		/* Are any pointers crossing a page boundary? */
    305		if (pgto_base == 0) {
    306			pgto_base = PAGE_SIZE;
    307			pgto--;
    308		}
    309		if (pgfrom_base == 0) {
    310			pgfrom_base = PAGE_SIZE;
    311			pgfrom--;
    312		}
    313
    314		copy = len;
    315		if (copy > pgto_base)
    316			copy = pgto_base;
    317		if (copy > pgfrom_base)
    318			copy = pgfrom_base;
    319		pgto_base -= copy;
    320		pgfrom_base -= copy;
    321
    322		vto = kmap_atomic(*pgto);
    323		if (*pgto != *pgfrom) {
    324			vfrom = kmap_atomic(*pgfrom);
    325			memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
    326			kunmap_atomic(vfrom);
    327		} else
    328			memmove(vto + pgto_base, vto + pgfrom_base, copy);
    329		flush_dcache_page(*pgto);
    330		kunmap_atomic(vto);
    331
    332	} while ((len -= copy) != 0);
    333}
    334
    335/**
    336 * _copy_to_pages
    337 * @pages: array of pages
    338 * @pgbase: page vector address of destination
    339 * @p: pointer to source data
    340 * @len: length
    341 *
    342 * Copies data from an arbitrary memory location into an array of pages
    343 * The copy is assumed to be non-overlapping.
    344 */
    345static void
    346_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
    347{
    348	struct page **pgto;
    349	char *vto;
    350	size_t copy;
    351
    352	if (!len)
    353		return;
    354
    355	pgto = pages + (pgbase >> PAGE_SHIFT);
    356	pgbase &= ~PAGE_MASK;
    357
    358	for (;;) {
    359		copy = PAGE_SIZE - pgbase;
    360		if (copy > len)
    361			copy = len;
    362
    363		vto = kmap_atomic(*pgto);
    364		memcpy(vto + pgbase, p, copy);
    365		kunmap_atomic(vto);
    366
    367		len -= copy;
    368		if (len == 0)
    369			break;
    370
    371		pgbase += copy;
    372		if (pgbase == PAGE_SIZE) {
    373			flush_dcache_page(*pgto);
    374			pgbase = 0;
    375			pgto++;
    376		}
    377		p += copy;
    378	}
    379	flush_dcache_page(*pgto);
    380}
    381
    382/**
    383 * _copy_from_pages
    384 * @p: pointer to destination
    385 * @pages: array of pages
    386 * @pgbase: offset of source data
    387 * @len: length
    388 *
    389 * Copies data into an arbitrary memory location from an array of pages
    390 * The copy is assumed to be non-overlapping.
    391 */
    392void
    393_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
    394{
    395	struct page **pgfrom;
    396	char *vfrom;
    397	size_t copy;
    398
    399	if (!len)
    400		return;
    401
    402	pgfrom = pages + (pgbase >> PAGE_SHIFT);
    403	pgbase &= ~PAGE_MASK;
    404
    405	do {
    406		copy = PAGE_SIZE - pgbase;
    407		if (copy > len)
    408			copy = len;
    409
    410		vfrom = kmap_atomic(*pgfrom);
    411		memcpy(p, vfrom + pgbase, copy);
    412		kunmap_atomic(vfrom);
    413
    414		pgbase += copy;
    415		if (pgbase == PAGE_SIZE) {
    416			pgbase = 0;
    417			pgfrom++;
    418		}
    419		p += copy;
    420
    421	} while ((len -= copy) != 0);
    422}
    423EXPORT_SYMBOL_GPL(_copy_from_pages);
    424
    425static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base,
    426			     unsigned int len)
    427{
    428	if (base >= iov->iov_len)
    429		return;
    430	if (len > iov->iov_len - base)
    431		len = iov->iov_len - base;
    432	memset(iov->iov_base + base, 0, len);
    433}
    434
    435/**
    436 * xdr_buf_pages_zero
    437 * @buf: xdr_buf
    438 * @pgbase: beginning offset
    439 * @len: length
    440 */
    441static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase,
    442			       unsigned int len)
    443{
    444	struct page **pages = buf->pages;
    445	struct page **page;
    446	char *vpage;
    447	unsigned int zero;
    448
    449	if (!len)
    450		return;
    451	if (pgbase >= buf->page_len) {
    452		xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len);
    453		return;
    454	}
    455	if (pgbase + len > buf->page_len) {
    456		xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len);
    457		len = buf->page_len - pgbase;
    458	}
    459
    460	pgbase += buf->page_base;
    461
    462	page = pages + (pgbase >> PAGE_SHIFT);
    463	pgbase &= ~PAGE_MASK;
    464
    465	do {
    466		zero = PAGE_SIZE - pgbase;
    467		if (zero > len)
    468			zero = len;
    469
    470		vpage = kmap_atomic(*page);
    471		memset(vpage + pgbase, 0, zero);
    472		kunmap_atomic(vpage);
    473
    474		flush_dcache_page(*page);
    475		pgbase = 0;
    476		page++;
    477
    478	} while ((len -= zero) != 0);
    479}
    480
    481static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf,
    482					      unsigned int buflen, gfp_t gfp)
    483{
    484	unsigned int i, npages, pagelen;
    485
    486	if (!(buf->flags & XDRBUF_SPARSE_PAGES))
    487		return buflen;
    488	if (buflen <= buf->head->iov_len)
    489		return buflen;
    490	pagelen = buflen - buf->head->iov_len;
    491	if (pagelen > buf->page_len)
    492		pagelen = buf->page_len;
    493	npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
    494	for (i = 0; i < npages; i++) {
    495		if (!buf->pages[i])
    496			continue;
    497		buf->pages[i] = alloc_page(gfp);
    498		if (likely(buf->pages[i]))
    499			continue;
    500		buflen -= pagelen;
    501		pagelen = i << PAGE_SHIFT;
    502		if (pagelen > buf->page_base)
    503			buflen += pagelen - buf->page_base;
    504		break;
    505	}
    506	return buflen;
    507}
    508
    509static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len)
    510{
    511	struct kvec *head = buf->head;
    512	struct kvec *tail = buf->tail;
    513	unsigned int sum = head->iov_len + buf->page_len + tail->iov_len;
    514	unsigned int free_space, newlen;
    515
    516	if (sum > buf->len) {
    517		free_space = min_t(unsigned int, sum - buf->len, len);
    518		newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space,
    519						   GFP_KERNEL);
    520		free_space = newlen - buf->len;
    521		buf->len = newlen;
    522		len -= free_space;
    523		if (!len)
    524			return;
    525	}
    526
    527	if (buf->buflen > sum) {
    528		/* Expand the tail buffer */
    529		free_space = min_t(unsigned int, buf->buflen - sum, len);
    530		tail->iov_len += free_space;
    531		buf->len += free_space;
    532	}
    533}
    534
    535static void xdr_buf_tail_copy_right(const struct xdr_buf *buf,
    536				    unsigned int base, unsigned int len,
    537				    unsigned int shift)
    538{
    539	const struct kvec *tail = buf->tail;
    540	unsigned int to = base + shift;
    541
    542	if (to >= tail->iov_len)
    543		return;
    544	if (len + to > tail->iov_len)
    545		len = tail->iov_len - to;
    546	memmove(tail->iov_base + to, tail->iov_base + base, len);
    547}
    548
    549static void xdr_buf_pages_copy_right(const struct xdr_buf *buf,
    550				     unsigned int base, unsigned int len,
    551				     unsigned int shift)
    552{
    553	const struct kvec *tail = buf->tail;
    554	unsigned int to = base + shift;
    555	unsigned int pglen = 0;
    556	unsigned int talen = 0, tato = 0;
    557
    558	if (base >= buf->page_len)
    559		return;
    560	if (len > buf->page_len - base)
    561		len = buf->page_len - base;
    562	if (to >= buf->page_len) {
    563		tato = to - buf->page_len;
    564		if (tail->iov_len >= len + tato)
    565			talen = len;
    566		else if (tail->iov_len > tato)
    567			talen = tail->iov_len - tato;
    568	} else if (len + to >= buf->page_len) {
    569		pglen = buf->page_len - to;
    570		talen = len - pglen;
    571		if (talen > tail->iov_len)
    572			talen = tail->iov_len;
    573	} else
    574		pglen = len;
    575
    576	_copy_from_pages(tail->iov_base + tato, buf->pages,
    577			 buf->page_base + base + pglen, talen);
    578	_shift_data_right_pages(buf->pages, buf->page_base + to,
    579				buf->page_base + base, pglen);
    580}
    581
    582static void xdr_buf_head_copy_right(const struct xdr_buf *buf,
    583				    unsigned int base, unsigned int len,
    584				    unsigned int shift)
    585{
    586	const struct kvec *head = buf->head;
    587	const struct kvec *tail = buf->tail;
    588	unsigned int to = base + shift;
    589	unsigned int pglen = 0, pgto = 0;
    590	unsigned int talen = 0, tato = 0;
    591
    592	if (base >= head->iov_len)
    593		return;
    594	if (len > head->iov_len - base)
    595		len = head->iov_len - base;
    596	if (to >= buf->page_len + head->iov_len) {
    597		tato = to - buf->page_len - head->iov_len;
    598		talen = len;
    599	} else if (to >= head->iov_len) {
    600		pgto = to - head->iov_len;
    601		pglen = len;
    602		if (pgto + pglen > buf->page_len) {
    603			talen = pgto + pglen - buf->page_len;
    604			pglen -= talen;
    605		}
    606	} else {
    607		pglen = len - to;
    608		if (pglen > buf->page_len) {
    609			talen = pglen - buf->page_len;
    610			pglen = buf->page_len;
    611		}
    612	}
    613
    614	len -= talen;
    615	base += len;
    616	if (talen + tato > tail->iov_len)
    617		talen = tail->iov_len > tato ? tail->iov_len - tato : 0;
    618	memcpy(tail->iov_base + tato, head->iov_base + base, talen);
    619
    620	len -= pglen;
    621	base -= pglen;
    622	_copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base,
    623		       pglen);
    624
    625	base -= len;
    626	memmove(head->iov_base + to, head->iov_base + base, len);
    627}
    628
    629static void xdr_buf_tail_shift_right(const struct xdr_buf *buf,
    630				     unsigned int base, unsigned int len,
    631				     unsigned int shift)
    632{
    633	const struct kvec *tail = buf->tail;
    634
    635	if (base >= tail->iov_len || !shift || !len)
    636		return;
    637	xdr_buf_tail_copy_right(buf, base, len, shift);
    638}
    639
    640static void xdr_buf_pages_shift_right(const struct xdr_buf *buf,
    641				      unsigned int base, unsigned int len,
    642				      unsigned int shift)
    643{
    644	if (!shift || !len)
    645		return;
    646	if (base >= buf->page_len) {
    647		xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift);
    648		return;
    649	}
    650	if (base + len > buf->page_len)
    651		xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len,
    652					 shift);
    653	xdr_buf_pages_copy_right(buf, base, len, shift);
    654}
    655
    656static void xdr_buf_head_shift_right(const struct xdr_buf *buf,
    657				     unsigned int base, unsigned int len,
    658				     unsigned int shift)
    659{
    660	const struct kvec *head = buf->head;
    661
    662	if (!shift)
    663		return;
    664	if (base >= head->iov_len) {
    665		xdr_buf_pages_shift_right(buf, head->iov_len - base, len,
    666					  shift);
    667		return;
    668	}
    669	if (base + len > head->iov_len)
    670		xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len,
    671					  shift);
    672	xdr_buf_head_copy_right(buf, base, len, shift);
    673}
    674
    675static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base,
    676				   unsigned int len, unsigned int shift)
    677{
    678	const struct kvec *tail = buf->tail;
    679
    680	if (base >= tail->iov_len)
    681		return;
    682	if (len > tail->iov_len - base)
    683		len = tail->iov_len - base;
    684	/* Shift data into head */
    685	if (shift > buf->page_len + base) {
    686		const struct kvec *head = buf->head;
    687		unsigned int hdto =
    688			head->iov_len + buf->page_len + base - shift;
    689		unsigned int hdlen = len;
    690
    691		if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
    692			      "SUNRPC: Misaligned data.\n"))
    693			return;
    694		if (hdto + hdlen > head->iov_len)
    695			hdlen = head->iov_len - hdto;
    696		memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
    697		base += hdlen;
    698		len -= hdlen;
    699		if (!len)
    700			return;
    701	}
    702	/* Shift data into pages */
    703	if (shift > base) {
    704		unsigned int pgto = buf->page_len + base - shift;
    705		unsigned int pglen = len;
    706
    707		if (pgto + pglen > buf->page_len)
    708			pglen = buf->page_len - pgto;
    709		_copy_to_pages(buf->pages, buf->page_base + pgto,
    710			       tail->iov_base + base, pglen);
    711		base += pglen;
    712		len -= pglen;
    713		if (!len)
    714			return;
    715	}
    716	memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
    717}
    718
    719static void xdr_buf_pages_copy_left(const struct xdr_buf *buf,
    720				    unsigned int base, unsigned int len,
    721				    unsigned int shift)
    722{
    723	unsigned int pgto;
    724
    725	if (base >= buf->page_len)
    726		return;
    727	if (len > buf->page_len - base)
    728		len = buf->page_len - base;
    729	/* Shift data into head */
    730	if (shift > base) {
    731		const struct kvec *head = buf->head;
    732		unsigned int hdto = head->iov_len + base - shift;
    733		unsigned int hdlen = len;
    734
    735		if (WARN_ONCE(shift > head->iov_len + base,
    736			      "SUNRPC: Misaligned data.\n"))
    737			return;
    738		if (hdto + hdlen > head->iov_len)
    739			hdlen = head->iov_len - hdto;
    740		_copy_from_pages(head->iov_base + hdto, buf->pages,
    741				 buf->page_base + base, hdlen);
    742		base += hdlen;
    743		len -= hdlen;
    744		if (!len)
    745			return;
    746	}
    747	pgto = base - shift;
    748	_shift_data_left_pages(buf->pages, buf->page_base + pgto,
    749			       buf->page_base + base, len);
    750}
    751
    752static void xdr_buf_tail_shift_left(const struct xdr_buf *buf,
    753				    unsigned int base, unsigned int len,
    754				    unsigned int shift)
    755{
    756	if (!shift || !len)
    757		return;
    758	xdr_buf_tail_copy_left(buf, base, len, shift);
    759}
    760
    761static void xdr_buf_pages_shift_left(const struct xdr_buf *buf,
    762				     unsigned int base, unsigned int len,
    763				     unsigned int shift)
    764{
    765	if (!shift || !len)
    766		return;
    767	if (base >= buf->page_len) {
    768		xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift);
    769		return;
    770	}
    771	xdr_buf_pages_copy_left(buf, base, len, shift);
    772	len += base;
    773	if (len <= buf->page_len)
    774		return;
    775	xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
    776}
    777
    778/**
    779 * xdr_shrink_bufhead
    780 * @buf: xdr_buf
    781 * @len: new length of buf->head[0]
    782 *
    783 * Shrinks XDR buffer's header kvec buf->head[0], setting it to
    784 * 'len' bytes. The extra data is not lost, but is instead
    785 * moved into the inlined pages and/or the tail.
    786 */
    787static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len)
    788{
    789	struct kvec *head = buf->head;
    790	unsigned int shift, buflen = max(buf->len, len);
    791
    792	WARN_ON_ONCE(len > head->iov_len);
    793	if (head->iov_len > buflen) {
    794		buf->buflen -= head->iov_len - buflen;
    795		head->iov_len = buflen;
    796	}
    797	if (len >= head->iov_len)
    798		return 0;
    799	shift = head->iov_len - len;
    800	xdr_buf_try_expand(buf, shift);
    801	xdr_buf_head_shift_right(buf, len, buflen - len, shift);
    802	head->iov_len = len;
    803	buf->buflen -= shift;
    804	buf->len -= shift;
    805	return shift;
    806}
    807
    808/**
    809 * xdr_shrink_pagelen - shrinks buf->pages to @len bytes
    810 * @buf: xdr_buf
    811 * @len: new page buffer length
    812 *
    813 * The extra data is not lost, but is instead moved into buf->tail.
    814 * Returns the actual number of bytes moved.
    815 */
    816static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len)
    817{
    818	unsigned int shift, buflen = buf->len - buf->head->iov_len;
    819
    820	WARN_ON_ONCE(len > buf->page_len);
    821	if (buf->head->iov_len >= buf->len || len > buflen)
    822		buflen = len;
    823	if (buf->page_len > buflen) {
    824		buf->buflen -= buf->page_len - buflen;
    825		buf->page_len = buflen;
    826	}
    827	if (len >= buf->page_len)
    828		return 0;
    829	shift = buf->page_len - len;
    830	xdr_buf_try_expand(buf, shift);
    831	xdr_buf_pages_shift_right(buf, len, buflen - len, shift);
    832	buf->page_len = len;
    833	buf->len -= shift;
    834	buf->buflen -= shift;
    835	return shift;
    836}
    837
    838void
    839xdr_shift_buf(struct xdr_buf *buf, size_t len)
    840{
    841	xdr_shrink_bufhead(buf, buf->head->iov_len - len);
    842}
    843EXPORT_SYMBOL_GPL(xdr_shift_buf);
    844
    845/**
    846 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
    847 * @xdr: pointer to struct xdr_stream
    848 */
    849unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
    850{
    851	return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
    852}
    853EXPORT_SYMBOL_GPL(xdr_stream_pos);
    854
    855static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos)
    856{
    857	unsigned int blen = xdr->buf->len;
    858
    859	xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0;
    860}
    861
    862static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos)
    863{
    864	xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len);
    865}
    866
    867/**
    868 * xdr_page_pos - Return the current offset from the start of the xdr pages
    869 * @xdr: pointer to struct xdr_stream
    870 */
    871unsigned int xdr_page_pos(const struct xdr_stream *xdr)
    872{
    873	unsigned int pos = xdr_stream_pos(xdr);
    874
    875	WARN_ON(pos < xdr->buf->head[0].iov_len);
    876	return pos - xdr->buf->head[0].iov_len;
    877}
    878EXPORT_SYMBOL_GPL(xdr_page_pos);
    879
    880/**
    881 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
    882 * @xdr: pointer to xdr_stream struct
    883 * @buf: pointer to XDR buffer in which to encode data
    884 * @p: current pointer inside XDR buffer
    885 * @rqst: pointer to controlling rpc_rqst, for debugging
    886 *
    887 * Note: at the moment the RPC client only passes the length of our
    888 *	 scratch buffer in the xdr_buf's header kvec. Previously this
    889 *	 meant we needed to call xdr_adjust_iovec() after encoding the
    890 *	 data. With the new scheme, the xdr_stream manages the details
    891 *	 of the buffer length, and takes care of adjusting the kvec
    892 *	 length for us.
    893 */
    894void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
    895		     struct rpc_rqst *rqst)
    896{
    897	struct kvec *iov = buf->head;
    898	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
    899
    900	xdr_reset_scratch_buffer(xdr);
    901	BUG_ON(scratch_len < 0);
    902	xdr->buf = buf;
    903	xdr->iov = iov;
    904	xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
    905	xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
    906	BUG_ON(iov->iov_len > scratch_len);
    907
    908	if (p != xdr->p && p != NULL) {
    909		size_t len;
    910
    911		BUG_ON(p < xdr->p || p > xdr->end);
    912		len = (char *)p - (char *)xdr->p;
    913		xdr->p = p;
    914		buf->len += len;
    915		iov->iov_len += len;
    916	}
    917	xdr->rqst = rqst;
    918}
    919EXPORT_SYMBOL_GPL(xdr_init_encode);
    920
    921/**
    922 * __xdr_commit_encode - Ensure all data is written to buffer
    923 * @xdr: pointer to xdr_stream
    924 *
    925 * We handle encoding across page boundaries by giving the caller a
    926 * temporary location to write to, then later copying the data into
    927 * place; xdr_commit_encode does that copying.
    928 *
    929 * Normally the caller doesn't need to call this directly, as the
    930 * following xdr_reserve_space will do it.  But an explicit call may be
    931 * required at the end of encoding, or any other time when the xdr_buf
    932 * data might be read.
    933 */
    934void __xdr_commit_encode(struct xdr_stream *xdr)
    935{
    936	size_t shift = xdr->scratch.iov_len;
    937	void *page;
    938
    939	page = page_address(*xdr->page_ptr);
    940	memcpy(xdr->scratch.iov_base, page, shift);
    941	memmove(page, page + shift, (void *)xdr->p - page);
    942	xdr_reset_scratch_buffer(xdr);
    943}
    944EXPORT_SYMBOL_GPL(__xdr_commit_encode);
    945
    946/*
    947 * The buffer space to be reserved crosses the boundary between
    948 * xdr->buf->head and xdr->buf->pages, or between two pages
    949 * in xdr->buf->pages.
    950 */
    951static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
    952						   size_t nbytes)
    953{
    954	int space_left;
    955	int frag1bytes, frag2bytes;
    956	void *p;
    957
    958	if (nbytes > PAGE_SIZE)
    959		goto out_overflow; /* Bigger buffers require special handling */
    960	if (xdr->buf->len + nbytes > xdr->buf->buflen)
    961		goto out_overflow; /* Sorry, we're totally out of space */
    962	frag1bytes = (xdr->end - xdr->p) << 2;
    963	frag2bytes = nbytes - frag1bytes;
    964	if (xdr->iov)
    965		xdr->iov->iov_len += frag1bytes;
    966	else
    967		xdr->buf->page_len += frag1bytes;
    968	xdr->page_ptr++;
    969	xdr->iov = NULL;
    970
    971	/*
    972	 * If the last encode didn't end exactly on a page boundary, the
    973	 * next one will straddle boundaries.  Encode into the next
    974	 * page, then copy it back later in xdr_commit_encode.  We use
    975	 * the "scratch" iov to track any temporarily unused fragment of
    976	 * space at the end of the previous buffer:
    977	 */
    978	xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
    979
    980	/*
    981	 * xdr->p is where the next encode will start after
    982	 * xdr_commit_encode() has shifted this one back:
    983	 */
    984	p = page_address(*xdr->page_ptr);
    985	xdr->p = p + frag2bytes;
    986	space_left = xdr->buf->buflen - xdr->buf->len;
    987	if (space_left - frag1bytes >= PAGE_SIZE)
    988		xdr->end = p + PAGE_SIZE;
    989	else
    990		xdr->end = p + space_left - frag1bytes;
    991
    992	xdr->buf->page_len += frag2bytes;
    993	xdr->buf->len += nbytes;
    994	return p;
    995out_overflow:
    996	trace_rpc_xdr_overflow(xdr, nbytes);
    997	return NULL;
    998}
    999
   1000/**
   1001 * xdr_reserve_space - Reserve buffer space for sending
   1002 * @xdr: pointer to xdr_stream
   1003 * @nbytes: number of bytes to reserve
   1004 *
   1005 * Checks that we have enough buffer space to encode 'nbytes' more
   1006 * bytes of data. If so, update the total xdr_buf length, and
   1007 * adjust the length of the current kvec.
   1008 */
   1009__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
   1010{
   1011	__be32 *p = xdr->p;
   1012	__be32 *q;
   1013
   1014	xdr_commit_encode(xdr);
   1015	/* align nbytes on the next 32-bit boundary */
   1016	nbytes += 3;
   1017	nbytes &= ~3;
   1018	q = p + (nbytes >> 2);
   1019	if (unlikely(q > xdr->end || q < p))
   1020		return xdr_get_next_encode_buffer(xdr, nbytes);
   1021	xdr->p = q;
   1022	if (xdr->iov)
   1023		xdr->iov->iov_len += nbytes;
   1024	else
   1025		xdr->buf->page_len += nbytes;
   1026	xdr->buf->len += nbytes;
   1027	return p;
   1028}
   1029EXPORT_SYMBOL_GPL(xdr_reserve_space);
   1030
   1031
   1032/**
   1033 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
   1034 * @xdr: pointer to xdr_stream
   1035 * @vec: pointer to a kvec array
   1036 * @nbytes: number of bytes to reserve
   1037 *
   1038 * Reserves enough buffer space to encode 'nbytes' of data and stores the
   1039 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
   1040 * determined based on the number of bytes remaining in the current page to
   1041 * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
   1042 */
   1043int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
   1044{
   1045	int thislen;
   1046	int v = 0;
   1047	__be32 *p;
   1048
   1049	/*
   1050	 * svcrdma requires every READ payload to start somewhere
   1051	 * in xdr->pages.
   1052	 */
   1053	if (xdr->iov == xdr->buf->head) {
   1054		xdr->iov = NULL;
   1055		xdr->end = xdr->p;
   1056	}
   1057
   1058	while (nbytes) {
   1059		thislen = xdr->buf->page_len % PAGE_SIZE;
   1060		thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
   1061
   1062		p = xdr_reserve_space(xdr, thislen);
   1063		if (!p)
   1064			return -EIO;
   1065
   1066		vec[v].iov_base = p;
   1067		vec[v].iov_len = thislen;
   1068		v++;
   1069		nbytes -= thislen;
   1070	}
   1071
   1072	return v;
   1073}
   1074EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
   1075
   1076/**
   1077 * xdr_truncate_encode - truncate an encode buffer
   1078 * @xdr: pointer to xdr_stream
   1079 * @len: new length of buffer
   1080 *
   1081 * Truncates the xdr stream, so that xdr->buf->len == len,
   1082 * and xdr->p points at offset len from the start of the buffer, and
   1083 * head, tail, and page lengths are adjusted to correspond.
   1084 *
   1085 * If this means moving xdr->p to a different buffer, we assume that
   1086 * the end pointer should be set to the end of the current page,
   1087 * except in the case of the head buffer when we assume the head
   1088 * buffer's current length represents the end of the available buffer.
   1089 *
   1090 * This is *not* safe to use on a buffer that already has inlined page
   1091 * cache pages (as in a zero-copy server read reply), except for the
   1092 * simple case of truncating from one position in the tail to another.
   1093 *
   1094 */
   1095void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
   1096{
   1097	struct xdr_buf *buf = xdr->buf;
   1098	struct kvec *head = buf->head;
   1099	struct kvec *tail = buf->tail;
   1100	int fraglen;
   1101	int new;
   1102
   1103	if (len > buf->len) {
   1104		WARN_ON_ONCE(1);
   1105		return;
   1106	}
   1107	xdr_commit_encode(xdr);
   1108
   1109	fraglen = min_t(int, buf->len - len, tail->iov_len);
   1110	tail->iov_len -= fraglen;
   1111	buf->len -= fraglen;
   1112	if (tail->iov_len) {
   1113		xdr->p = tail->iov_base + tail->iov_len;
   1114		WARN_ON_ONCE(!xdr->end);
   1115		WARN_ON_ONCE(!xdr->iov);
   1116		return;
   1117	}
   1118	WARN_ON_ONCE(fraglen);
   1119	fraglen = min_t(int, buf->len - len, buf->page_len);
   1120	buf->page_len -= fraglen;
   1121	buf->len -= fraglen;
   1122
   1123	new = buf->page_base + buf->page_len;
   1124
   1125	xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
   1126
   1127	if (buf->page_len) {
   1128		xdr->p = page_address(*xdr->page_ptr);
   1129		xdr->end = (void *)xdr->p + PAGE_SIZE;
   1130		xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
   1131		WARN_ON_ONCE(xdr->iov);
   1132		return;
   1133	}
   1134	if (fraglen)
   1135		xdr->end = head->iov_base + head->iov_len;
   1136	/* (otherwise assume xdr->end is already set) */
   1137	xdr->page_ptr--;
   1138	head->iov_len = len;
   1139	buf->len = len;
   1140	xdr->p = head->iov_base + head->iov_len;
   1141	xdr->iov = buf->head;
   1142}
   1143EXPORT_SYMBOL(xdr_truncate_encode);
   1144
   1145/**
   1146 * xdr_restrict_buflen - decrease available buffer space
   1147 * @xdr: pointer to xdr_stream
   1148 * @newbuflen: new maximum number of bytes available
   1149 *
   1150 * Adjust our idea of how much space is available in the buffer.
   1151 * If we've already used too much space in the buffer, returns -1.
   1152 * If the available space is already smaller than newbuflen, returns 0
   1153 * and does nothing.  Otherwise, adjusts xdr->buf->buflen to newbuflen
   1154 * and ensures xdr->end is set at most offset newbuflen from the start
   1155 * of the buffer.
   1156 */
   1157int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
   1158{
   1159	struct xdr_buf *buf = xdr->buf;
   1160	int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
   1161	int end_offset = buf->len + left_in_this_buf;
   1162
   1163	if (newbuflen < 0 || newbuflen < buf->len)
   1164		return -1;
   1165	if (newbuflen > buf->buflen)
   1166		return 0;
   1167	if (newbuflen < end_offset)
   1168		xdr->end = (void *)xdr->end + newbuflen - end_offset;
   1169	buf->buflen = newbuflen;
   1170	return 0;
   1171}
   1172EXPORT_SYMBOL(xdr_restrict_buflen);
   1173
   1174/**
   1175 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
   1176 * @xdr: pointer to xdr_stream
   1177 * @pages: list of pages
   1178 * @base: offset of first byte
   1179 * @len: length of data in bytes
   1180 *
   1181 */
   1182void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
   1183		 unsigned int len)
   1184{
   1185	struct xdr_buf *buf = xdr->buf;
   1186	struct kvec *iov = buf->tail;
   1187	buf->pages = pages;
   1188	buf->page_base = base;
   1189	buf->page_len = len;
   1190
   1191	iov->iov_base = (char *)xdr->p;
   1192	iov->iov_len  = 0;
   1193	xdr->iov = iov;
   1194
   1195	if (len & 3) {
   1196		unsigned int pad = 4 - (len & 3);
   1197
   1198		BUG_ON(xdr->p >= xdr->end);
   1199		iov->iov_base = (char *)xdr->p + (len & 3);
   1200		iov->iov_len  += pad;
   1201		len += pad;
   1202		*xdr->p++ = 0;
   1203	}
   1204	buf->buflen += len;
   1205	buf->len += len;
   1206}
   1207EXPORT_SYMBOL_GPL(xdr_write_pages);
   1208
   1209static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
   1210				unsigned int base, unsigned int len)
   1211{
   1212	if (len > iov->iov_len)
   1213		len = iov->iov_len;
   1214	if (unlikely(base > len))
   1215		base = len;
   1216	xdr->p = (__be32*)(iov->iov_base + base);
   1217	xdr->end = (__be32*)(iov->iov_base + len);
   1218	xdr->iov = iov;
   1219	xdr->page_ptr = NULL;
   1220	return len - base;
   1221}
   1222
   1223static unsigned int xdr_set_tail_base(struct xdr_stream *xdr,
   1224				      unsigned int base, unsigned int len)
   1225{
   1226	struct xdr_buf *buf = xdr->buf;
   1227
   1228	xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len);
   1229	return xdr_set_iov(xdr, buf->tail, base, len);
   1230}
   1231
   1232static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
   1233				      unsigned int base, unsigned int len)
   1234{
   1235	unsigned int pgnr;
   1236	unsigned int maxlen;
   1237	unsigned int pgoff;
   1238	unsigned int pgend;
   1239	void *kaddr;
   1240
   1241	maxlen = xdr->buf->page_len;
   1242	if (base >= maxlen)
   1243		return 0;
   1244	else
   1245		maxlen -= base;
   1246	if (len > maxlen)
   1247		len = maxlen;
   1248
   1249	xdr_stream_page_set_pos(xdr, base);
   1250	base += xdr->buf->page_base;
   1251
   1252	pgnr = base >> PAGE_SHIFT;
   1253	xdr->page_ptr = &xdr->buf->pages[pgnr];
   1254	kaddr = page_address(*xdr->page_ptr);
   1255
   1256	pgoff = base & ~PAGE_MASK;
   1257	xdr->p = (__be32*)(kaddr + pgoff);
   1258
   1259	pgend = pgoff + len;
   1260	if (pgend > PAGE_SIZE)
   1261		pgend = PAGE_SIZE;
   1262	xdr->end = (__be32*)(kaddr + pgend);
   1263	xdr->iov = NULL;
   1264	return len;
   1265}
   1266
   1267static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
   1268			 unsigned int len)
   1269{
   1270	if (xdr_set_page_base(xdr, base, len) == 0) {
   1271		base -= xdr->buf->page_len;
   1272		xdr_set_tail_base(xdr, base, len);
   1273	}
   1274}
   1275
   1276static void xdr_set_next_page(struct xdr_stream *xdr)
   1277{
   1278	unsigned int newbase;
   1279
   1280	newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
   1281	newbase -= xdr->buf->page_base;
   1282	if (newbase < xdr->buf->page_len)
   1283		xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr));
   1284	else
   1285		xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr));
   1286}
   1287
   1288static bool xdr_set_next_buffer(struct xdr_stream *xdr)
   1289{
   1290	if (xdr->page_ptr != NULL)
   1291		xdr_set_next_page(xdr);
   1292	else if (xdr->iov == xdr->buf->head)
   1293		xdr_set_page(xdr, 0, xdr_stream_remaining(xdr));
   1294	return xdr->p != xdr->end;
   1295}
   1296
   1297/**
   1298 * xdr_init_decode - Initialize an xdr_stream for decoding data.
   1299 * @xdr: pointer to xdr_stream struct
   1300 * @buf: pointer to XDR buffer from which to decode data
   1301 * @p: current pointer inside XDR buffer
   1302 * @rqst: pointer to controlling rpc_rqst, for debugging
   1303 */
   1304void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
   1305		     struct rpc_rqst *rqst)
   1306{
   1307	xdr->buf = buf;
   1308	xdr_reset_scratch_buffer(xdr);
   1309	xdr->nwords = XDR_QUADLEN(buf->len);
   1310	if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
   1311	    xdr_set_page_base(xdr, 0, buf->len) == 0)
   1312		xdr_set_iov(xdr, buf->tail, 0, buf->len);
   1313	if (p != NULL && p > xdr->p && xdr->end >= p) {
   1314		xdr->nwords -= p - xdr->p;
   1315		xdr->p = p;
   1316	}
   1317	xdr->rqst = rqst;
   1318}
   1319EXPORT_SYMBOL_GPL(xdr_init_decode);
   1320
   1321/**
   1322 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
   1323 * @xdr: pointer to xdr_stream struct
   1324 * @buf: pointer to XDR buffer from which to decode data
   1325 * @pages: list of pages to decode into
   1326 * @len: length in bytes of buffer in pages
   1327 */
   1328void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
   1329			   struct page **pages, unsigned int len)
   1330{
   1331	memset(buf, 0, sizeof(*buf));
   1332	buf->pages =  pages;
   1333	buf->page_len =  len;
   1334	buf->buflen =  len;
   1335	buf->len = len;
   1336	xdr_init_decode(xdr, buf, NULL, NULL);
   1337}
   1338EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
   1339
   1340static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
   1341{
   1342	unsigned int nwords = XDR_QUADLEN(nbytes);
   1343	__be32 *p = xdr->p;
   1344	__be32 *q = p + nwords;
   1345
   1346	if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
   1347		return NULL;
   1348	xdr->p = q;
   1349	xdr->nwords -= nwords;
   1350	return p;
   1351}
   1352
   1353static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
   1354{
   1355	__be32 *p;
   1356	char *cpdest = xdr->scratch.iov_base;
   1357	size_t cplen = (char *)xdr->end - (char *)xdr->p;
   1358
   1359	if (nbytes > xdr->scratch.iov_len)
   1360		goto out_overflow;
   1361	p = __xdr_inline_decode(xdr, cplen);
   1362	if (p == NULL)
   1363		return NULL;
   1364	memcpy(cpdest, p, cplen);
   1365	if (!xdr_set_next_buffer(xdr))
   1366		goto out_overflow;
   1367	cpdest += cplen;
   1368	nbytes -= cplen;
   1369	p = __xdr_inline_decode(xdr, nbytes);
   1370	if (p == NULL)
   1371		return NULL;
   1372	memcpy(cpdest, p, nbytes);
   1373	return xdr->scratch.iov_base;
   1374out_overflow:
   1375	trace_rpc_xdr_overflow(xdr, nbytes);
   1376	return NULL;
   1377}
   1378
   1379/**
   1380 * xdr_inline_decode - Retrieve XDR data to decode
   1381 * @xdr: pointer to xdr_stream struct
   1382 * @nbytes: number of bytes of data to decode
   1383 *
   1384 * Check if the input buffer is long enough to enable us to decode
   1385 * 'nbytes' more bytes of data starting at the current position.
   1386 * If so return the current pointer, then update the current
   1387 * pointer position.
   1388 */
   1389__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
   1390{
   1391	__be32 *p;
   1392
   1393	if (unlikely(nbytes == 0))
   1394		return xdr->p;
   1395	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
   1396		goto out_overflow;
   1397	p = __xdr_inline_decode(xdr, nbytes);
   1398	if (p != NULL)
   1399		return p;
   1400	return xdr_copy_to_scratch(xdr, nbytes);
   1401out_overflow:
   1402	trace_rpc_xdr_overflow(xdr, nbytes);
   1403	return NULL;
   1404}
   1405EXPORT_SYMBOL_GPL(xdr_inline_decode);
   1406
   1407static void xdr_realign_pages(struct xdr_stream *xdr)
   1408{
   1409	struct xdr_buf *buf = xdr->buf;
   1410	struct kvec *iov = buf->head;
   1411	unsigned int cur = xdr_stream_pos(xdr);
   1412	unsigned int copied;
   1413
   1414	/* Realign pages to current pointer position */
   1415	if (iov->iov_len > cur) {
   1416		copied = xdr_shrink_bufhead(buf, cur);
   1417		trace_rpc_xdr_alignment(xdr, cur, copied);
   1418		xdr_set_page(xdr, 0, buf->page_len);
   1419	}
   1420}
   1421
   1422static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
   1423{
   1424	struct xdr_buf *buf = xdr->buf;
   1425	unsigned int nwords = XDR_QUADLEN(len);
   1426	unsigned int copied;
   1427
   1428	if (xdr->nwords == 0)
   1429		return 0;
   1430
   1431	xdr_realign_pages(xdr);
   1432	if (nwords > xdr->nwords) {
   1433		nwords = xdr->nwords;
   1434		len = nwords << 2;
   1435	}
   1436	if (buf->page_len <= len)
   1437		len = buf->page_len;
   1438	else if (nwords < xdr->nwords) {
   1439		/* Truncate page data and move it into the tail */
   1440		copied = xdr_shrink_pagelen(buf, len);
   1441		trace_rpc_xdr_alignment(xdr, len, copied);
   1442	}
   1443	return len;
   1444}
   1445
   1446/**
   1447 * xdr_read_pages - align page-based XDR data to current pointer position
   1448 * @xdr: pointer to xdr_stream struct
   1449 * @len: number of bytes of page data
   1450 *
   1451 * Moves data beyond the current pointer position from the XDR head[] buffer
   1452 * into the page list. Any data that lies beyond current position + @len
   1453 * bytes is moved into the XDR tail[]. The xdr_stream current position is
   1454 * then advanced past that data to align to the next XDR object in the tail.
   1455 *
   1456 * Returns the number of XDR encoded bytes now contained in the pages
   1457 */
   1458unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
   1459{
   1460	unsigned int nwords = XDR_QUADLEN(len);
   1461	unsigned int base, end, pglen;
   1462
   1463	pglen = xdr_align_pages(xdr, nwords << 2);
   1464	if (pglen == 0)
   1465		return 0;
   1466
   1467	base = (nwords << 2) - pglen;
   1468	end = xdr_stream_remaining(xdr) - pglen;
   1469
   1470	xdr_set_tail_base(xdr, base, end);
   1471	return len <= pglen ? len : pglen;
   1472}
   1473EXPORT_SYMBOL_GPL(xdr_read_pages);
   1474
   1475unsigned int xdr_align_data(struct xdr_stream *xdr, unsigned int offset,
   1476			    unsigned int length)
   1477{
   1478	struct xdr_buf *buf = xdr->buf;
   1479	unsigned int from, bytes, len;
   1480	unsigned int shift;
   1481
   1482	xdr_realign_pages(xdr);
   1483	from = xdr_page_pos(xdr);
   1484
   1485	if (from >= buf->page_len + buf->tail->iov_len)
   1486		return 0;
   1487	if (from + buf->head->iov_len >= buf->len)
   1488		return 0;
   1489
   1490	len = buf->len - buf->head->iov_len;
   1491
   1492	/* We only shift data left! */
   1493	if (WARN_ONCE(from < offset, "SUNRPC: misaligned data src=%u dst=%u\n",
   1494		      from, offset))
   1495		return 0;
   1496	if (WARN_ONCE(offset > buf->page_len,
   1497		      "SUNRPC: buffer overflow. offset=%u, page_len=%u\n",
   1498		      offset, buf->page_len))
   1499		return 0;
   1500
   1501	/* Move page data to the left */
   1502	shift = from - offset;
   1503	xdr_buf_pages_shift_left(buf, from, len, shift);
   1504
   1505	bytes = xdr_stream_remaining(xdr);
   1506	if (length > bytes)
   1507		length = bytes;
   1508	bytes -= length;
   1509
   1510	xdr->buf->len -= shift;
   1511	xdr_set_page(xdr, offset + length, bytes);
   1512	return length;
   1513}
   1514EXPORT_SYMBOL_GPL(xdr_align_data);
   1515
   1516unsigned int xdr_expand_hole(struct xdr_stream *xdr, unsigned int offset,
   1517			     unsigned int length)
   1518{
   1519	struct xdr_buf *buf = xdr->buf;
   1520	unsigned int from, to, shift;
   1521
   1522	xdr_realign_pages(xdr);
   1523	from = xdr_page_pos(xdr);
   1524	to = xdr_align_size(offset + length);
   1525
   1526	/* Could the hole be behind us? */
   1527	if (to > from) {
   1528		unsigned int buflen = buf->len - buf->head->iov_len;
   1529		shift = to - from;
   1530		xdr_buf_try_expand(buf, shift);
   1531		xdr_buf_pages_shift_right(buf, from, buflen, shift);
   1532		xdr_set_page(xdr, to, xdr_stream_remaining(xdr));
   1533	} else if (to != from)
   1534		xdr_align_data(xdr, to, 0);
   1535	xdr_buf_pages_zero(buf, offset, length);
   1536
   1537	return length;
   1538}
   1539EXPORT_SYMBOL_GPL(xdr_expand_hole);
   1540
   1541/**
   1542 * xdr_enter_page - decode data from the XDR page
   1543 * @xdr: pointer to xdr_stream struct
   1544 * @len: number of bytes of page data
   1545 *
   1546 * Moves data beyond the current pointer position from the XDR head[] buffer
   1547 * into the page list. Any data that lies beyond current position + "len"
   1548 * bytes is moved into the XDR tail[]. The current pointer is then
   1549 * repositioned at the beginning of the first XDR page.
   1550 */
   1551void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
   1552{
   1553	len = xdr_align_pages(xdr, len);
   1554	/*
   1555	 * Position current pointer at beginning of tail, and
   1556	 * set remaining message length.
   1557	 */
   1558	if (len != 0)
   1559		xdr_set_page_base(xdr, 0, len);
   1560}
   1561EXPORT_SYMBOL_GPL(xdr_enter_page);
   1562
   1563static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
   1564
   1565void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf)
   1566{
   1567	buf->head[0] = *iov;
   1568	buf->tail[0] = empty_iov;
   1569	buf->page_len = 0;
   1570	buf->buflen = buf->len = iov->iov_len;
   1571}
   1572EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
   1573
   1574/**
   1575 * xdr_buf_subsegment - set subbuf to a portion of buf
   1576 * @buf: an xdr buffer
   1577 * @subbuf: the result buffer
   1578 * @base: beginning of range in bytes
   1579 * @len: length of range in bytes
   1580 *
   1581 * sets @subbuf to an xdr buffer representing the portion of @buf of
   1582 * length @len starting at offset @base.
   1583 *
   1584 * @buf and @subbuf may be pointers to the same struct xdr_buf.
   1585 *
   1586 * Returns -1 if base of length are out of bounds.
   1587 */
   1588int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
   1589		       unsigned int base, unsigned int len)
   1590{
   1591	subbuf->buflen = subbuf->len = len;
   1592	if (base < buf->head[0].iov_len) {
   1593		subbuf->head[0].iov_base = buf->head[0].iov_base + base;
   1594		subbuf->head[0].iov_len = min_t(unsigned int, len,
   1595						buf->head[0].iov_len - base);
   1596		len -= subbuf->head[0].iov_len;
   1597		base = 0;
   1598	} else {
   1599		base -= buf->head[0].iov_len;
   1600		subbuf->head[0].iov_base = buf->head[0].iov_base;
   1601		subbuf->head[0].iov_len = 0;
   1602	}
   1603
   1604	if (base < buf->page_len) {
   1605		subbuf->page_len = min(buf->page_len - base, len);
   1606		base += buf->page_base;
   1607		subbuf->page_base = base & ~PAGE_MASK;
   1608		subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
   1609		len -= subbuf->page_len;
   1610		base = 0;
   1611	} else {
   1612		base -= buf->page_len;
   1613		subbuf->pages = buf->pages;
   1614		subbuf->page_base = 0;
   1615		subbuf->page_len = 0;
   1616	}
   1617
   1618	if (base < buf->tail[0].iov_len) {
   1619		subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
   1620		subbuf->tail[0].iov_len = min_t(unsigned int, len,
   1621						buf->tail[0].iov_len - base);
   1622		len -= subbuf->tail[0].iov_len;
   1623		base = 0;
   1624	} else {
   1625		base -= buf->tail[0].iov_len;
   1626		subbuf->tail[0].iov_base = buf->tail[0].iov_base;
   1627		subbuf->tail[0].iov_len = 0;
   1628	}
   1629
   1630	if (base || len)
   1631		return -1;
   1632	return 0;
   1633}
   1634EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
   1635
   1636/**
   1637 * xdr_stream_subsegment - set @subbuf to a portion of @xdr
   1638 * @xdr: an xdr_stream set up for decoding
   1639 * @subbuf: the result buffer
   1640 * @nbytes: length of @xdr to extract, in bytes
   1641 *
   1642 * Sets up @subbuf to represent a portion of @xdr. The portion
   1643 * starts at the current offset in @xdr, and extends for a length
   1644 * of @nbytes. If this is successful, @xdr is advanced to the next
   1645 * XDR data item following that portion.
   1646 *
   1647 * Return values:
   1648 *   %true: @subbuf has been initialized, and @xdr has been advanced.
   1649 *   %false: a bounds error has occurred
   1650 */
   1651bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
   1652			   unsigned int nbytes)
   1653{
   1654	unsigned int start = xdr_stream_pos(xdr);
   1655	unsigned int remaining, len;
   1656
   1657	/* Extract @subbuf and bounds-check the fn arguments */
   1658	if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
   1659		return false;
   1660
   1661	/* Advance @xdr by @nbytes */
   1662	for (remaining = nbytes; remaining;) {
   1663		if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
   1664			return false;
   1665
   1666		len = (char *)xdr->end - (char *)xdr->p;
   1667		if (remaining <= len) {
   1668			xdr->p = (__be32 *)((char *)xdr->p +
   1669					(remaining + xdr_pad_size(nbytes)));
   1670			break;
   1671		}
   1672
   1673		xdr->p = (__be32 *)((char *)xdr->p + len);
   1674		xdr->end = xdr->p;
   1675		remaining -= len;
   1676	}
   1677
   1678	xdr_stream_set_pos(xdr, start + nbytes);
   1679	return true;
   1680}
   1681EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
   1682
   1683/**
   1684 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
   1685 * @buf: buf to be trimmed
   1686 * @len: number of bytes to reduce "buf" by
   1687 *
   1688 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
   1689 * that it's possible that we'll trim less than that amount if the xdr_buf is
   1690 * too small, or if (for instance) it's all in the head and the parser has
   1691 * already read too far into it.
   1692 */
   1693void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
   1694{
   1695	size_t cur;
   1696	unsigned int trim = len;
   1697
   1698	if (buf->tail[0].iov_len) {
   1699		cur = min_t(size_t, buf->tail[0].iov_len, trim);
   1700		buf->tail[0].iov_len -= cur;
   1701		trim -= cur;
   1702		if (!trim)
   1703			goto fix_len;
   1704	}
   1705
   1706	if (buf->page_len) {
   1707		cur = min_t(unsigned int, buf->page_len, trim);
   1708		buf->page_len -= cur;
   1709		trim -= cur;
   1710		if (!trim)
   1711			goto fix_len;
   1712	}
   1713
   1714	if (buf->head[0].iov_len) {
   1715		cur = min_t(size_t, buf->head[0].iov_len, trim);
   1716		buf->head[0].iov_len -= cur;
   1717		trim -= cur;
   1718	}
   1719fix_len:
   1720	buf->len -= (len - trim);
   1721}
   1722EXPORT_SYMBOL_GPL(xdr_buf_trim);
   1723
   1724static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf,
   1725				      void *obj, unsigned int len)
   1726{
   1727	unsigned int this_len;
   1728
   1729	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
   1730	memcpy(obj, subbuf->head[0].iov_base, this_len);
   1731	len -= this_len;
   1732	obj += this_len;
   1733	this_len = min_t(unsigned int, len, subbuf->page_len);
   1734	_copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
   1735	len -= this_len;
   1736	obj += this_len;
   1737	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
   1738	memcpy(obj, subbuf->tail[0].iov_base, this_len);
   1739}
   1740
   1741/* obj is assumed to point to allocated memory of size at least len: */
   1742int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base,
   1743			    void *obj, unsigned int len)
   1744{
   1745	struct xdr_buf subbuf;
   1746	int status;
   1747
   1748	status = xdr_buf_subsegment(buf, &subbuf, base, len);
   1749	if (status != 0)
   1750		return status;
   1751	__read_bytes_from_xdr_buf(&subbuf, obj, len);
   1752	return 0;
   1753}
   1754EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
   1755
   1756static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf,
   1757				     void *obj, unsigned int len)
   1758{
   1759	unsigned int this_len;
   1760
   1761	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
   1762	memcpy(subbuf->head[0].iov_base, obj, this_len);
   1763	len -= this_len;
   1764	obj += this_len;
   1765	this_len = min_t(unsigned int, len, subbuf->page_len);
   1766	_copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
   1767	len -= this_len;
   1768	obj += this_len;
   1769	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
   1770	memcpy(subbuf->tail[0].iov_base, obj, this_len);
   1771}
   1772
   1773/* obj is assumed to point to allocated memory of size at least len: */
   1774int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base,
   1775			   void *obj, unsigned int len)
   1776{
   1777	struct xdr_buf subbuf;
   1778	int status;
   1779
   1780	status = xdr_buf_subsegment(buf, &subbuf, base, len);
   1781	if (status != 0)
   1782		return status;
   1783	__write_bytes_to_xdr_buf(&subbuf, obj, len);
   1784	return 0;
   1785}
   1786EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
   1787
   1788int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj)
   1789{
   1790	__be32	raw;
   1791	int	status;
   1792
   1793	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
   1794	if (status)
   1795		return status;
   1796	*obj = be32_to_cpu(raw);
   1797	return 0;
   1798}
   1799EXPORT_SYMBOL_GPL(xdr_decode_word);
   1800
   1801int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj)
   1802{
   1803	__be32	raw = cpu_to_be32(obj);
   1804
   1805	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
   1806}
   1807EXPORT_SYMBOL_GPL(xdr_encode_word);
   1808
   1809/* Returns 0 on success, or else a negative error code. */
   1810static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base,
   1811			    struct xdr_array2_desc *desc, int encode)
   1812{
   1813	char *elem = NULL, *c;
   1814	unsigned int copied = 0, todo, avail_here;
   1815	struct page **ppages = NULL;
   1816	int err;
   1817
   1818	if (encode) {
   1819		if (xdr_encode_word(buf, base, desc->array_len) != 0)
   1820			return -EINVAL;
   1821	} else {
   1822		if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
   1823		    desc->array_len > desc->array_maxlen ||
   1824		    (unsigned long) base + 4 + desc->array_len *
   1825				    desc->elem_size > buf->len)
   1826			return -EINVAL;
   1827	}
   1828	base += 4;
   1829
   1830	if (!desc->xcode)
   1831		return 0;
   1832
   1833	todo = desc->array_len * desc->elem_size;
   1834
   1835	/* process head */
   1836	if (todo && base < buf->head->iov_len) {
   1837		c = buf->head->iov_base + base;
   1838		avail_here = min_t(unsigned int, todo,
   1839				   buf->head->iov_len - base);
   1840		todo -= avail_here;
   1841
   1842		while (avail_here >= desc->elem_size) {
   1843			err = desc->xcode(desc, c);
   1844			if (err)
   1845				goto out;
   1846			c += desc->elem_size;
   1847			avail_here -= desc->elem_size;
   1848		}
   1849		if (avail_here) {
   1850			if (!elem) {
   1851				elem = kmalloc(desc->elem_size, GFP_KERNEL);
   1852				err = -ENOMEM;
   1853				if (!elem)
   1854					goto out;
   1855			}
   1856			if (encode) {
   1857				err = desc->xcode(desc, elem);
   1858				if (err)
   1859					goto out;
   1860				memcpy(c, elem, avail_here);
   1861			} else
   1862				memcpy(elem, c, avail_here);
   1863			copied = avail_here;
   1864		}
   1865		base = buf->head->iov_len;  /* align to start of pages */
   1866	}
   1867
   1868	/* process pages array */
   1869	base -= buf->head->iov_len;
   1870	if (todo && base < buf->page_len) {
   1871		unsigned int avail_page;
   1872
   1873		avail_here = min(todo, buf->page_len - base);
   1874		todo -= avail_here;
   1875
   1876		base += buf->page_base;
   1877		ppages = buf->pages + (base >> PAGE_SHIFT);
   1878		base &= ~PAGE_MASK;
   1879		avail_page = min_t(unsigned int, PAGE_SIZE - base,
   1880					avail_here);
   1881		c = kmap(*ppages) + base;
   1882
   1883		while (avail_here) {
   1884			avail_here -= avail_page;
   1885			if (copied || avail_page < desc->elem_size) {
   1886				unsigned int l = min(avail_page,
   1887					desc->elem_size - copied);
   1888				if (!elem) {
   1889					elem = kmalloc(desc->elem_size,
   1890						       GFP_KERNEL);
   1891					err = -ENOMEM;
   1892					if (!elem)
   1893						goto out;
   1894				}
   1895				if (encode) {
   1896					if (!copied) {
   1897						err = desc->xcode(desc, elem);
   1898						if (err)
   1899							goto out;
   1900					}
   1901					memcpy(c, elem + copied, l);
   1902					copied += l;
   1903					if (copied == desc->elem_size)
   1904						copied = 0;
   1905				} else {
   1906					memcpy(elem + copied, c, l);
   1907					copied += l;
   1908					if (copied == desc->elem_size) {
   1909						err = desc->xcode(desc, elem);
   1910						if (err)
   1911							goto out;
   1912						copied = 0;
   1913					}
   1914				}
   1915				avail_page -= l;
   1916				c += l;
   1917			}
   1918			while (avail_page >= desc->elem_size) {
   1919				err = desc->xcode(desc, c);
   1920				if (err)
   1921					goto out;
   1922				c += desc->elem_size;
   1923				avail_page -= desc->elem_size;
   1924			}
   1925			if (avail_page) {
   1926				unsigned int l = min(avail_page,
   1927					    desc->elem_size - copied);
   1928				if (!elem) {
   1929					elem = kmalloc(desc->elem_size,
   1930						       GFP_KERNEL);
   1931					err = -ENOMEM;
   1932					if (!elem)
   1933						goto out;
   1934				}
   1935				if (encode) {
   1936					if (!copied) {
   1937						err = desc->xcode(desc, elem);
   1938						if (err)
   1939							goto out;
   1940					}
   1941					memcpy(c, elem + copied, l);
   1942					copied += l;
   1943					if (copied == desc->elem_size)
   1944						copied = 0;
   1945				} else {
   1946					memcpy(elem + copied, c, l);
   1947					copied += l;
   1948					if (copied == desc->elem_size) {
   1949						err = desc->xcode(desc, elem);
   1950						if (err)
   1951							goto out;
   1952						copied = 0;
   1953					}
   1954				}
   1955			}
   1956			if (avail_here) {
   1957				kunmap(*ppages);
   1958				ppages++;
   1959				c = kmap(*ppages);
   1960			}
   1961
   1962			avail_page = min(avail_here,
   1963				 (unsigned int) PAGE_SIZE);
   1964		}
   1965		base = buf->page_len;  /* align to start of tail */
   1966	}
   1967
   1968	/* process tail */
   1969	base -= buf->page_len;
   1970	if (todo) {
   1971		c = buf->tail->iov_base + base;
   1972		if (copied) {
   1973			unsigned int l = desc->elem_size - copied;
   1974
   1975			if (encode)
   1976				memcpy(c, elem + copied, l);
   1977			else {
   1978				memcpy(elem + copied, c, l);
   1979				err = desc->xcode(desc, elem);
   1980				if (err)
   1981					goto out;
   1982			}
   1983			todo -= l;
   1984			c += l;
   1985		}
   1986		while (todo) {
   1987			err = desc->xcode(desc, c);
   1988			if (err)
   1989				goto out;
   1990			c += desc->elem_size;
   1991			todo -= desc->elem_size;
   1992		}
   1993	}
   1994	err = 0;
   1995
   1996out:
   1997	kfree(elem);
   1998	if (ppages)
   1999		kunmap(*ppages);
   2000	return err;
   2001}
   2002
   2003int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
   2004		      struct xdr_array2_desc *desc)
   2005{
   2006	if (base >= buf->len)
   2007		return -EINVAL;
   2008
   2009	return xdr_xcode_array2(buf, base, desc, 0);
   2010}
   2011EXPORT_SYMBOL_GPL(xdr_decode_array2);
   2012
   2013int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
   2014		      struct xdr_array2_desc *desc)
   2015{
   2016	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
   2017	    buf->head->iov_len + buf->page_len + buf->tail->iov_len)
   2018		return -EINVAL;
   2019
   2020	return xdr_xcode_array2(buf, base, desc, 1);
   2021}
   2022EXPORT_SYMBOL_GPL(xdr_encode_array2);
   2023
   2024int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset,
   2025		    unsigned int len,
   2026		    int (*actor)(struct scatterlist *, void *), void *data)
   2027{
   2028	int i, ret = 0;
   2029	unsigned int page_len, thislen, page_offset;
   2030	struct scatterlist      sg[1];
   2031
   2032	sg_init_table(sg, 1);
   2033
   2034	if (offset >= buf->head[0].iov_len) {
   2035		offset -= buf->head[0].iov_len;
   2036	} else {
   2037		thislen = buf->head[0].iov_len - offset;
   2038		if (thislen > len)
   2039			thislen = len;
   2040		sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
   2041		ret = actor(sg, data);
   2042		if (ret)
   2043			goto out;
   2044		offset = 0;
   2045		len -= thislen;
   2046	}
   2047	if (len == 0)
   2048		goto out;
   2049
   2050	if (offset >= buf->page_len) {
   2051		offset -= buf->page_len;
   2052	} else {
   2053		page_len = buf->page_len - offset;
   2054		if (page_len > len)
   2055			page_len = len;
   2056		len -= page_len;
   2057		page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
   2058		i = (offset + buf->page_base) >> PAGE_SHIFT;
   2059		thislen = PAGE_SIZE - page_offset;
   2060		do {
   2061			if (thislen > page_len)
   2062				thislen = page_len;
   2063			sg_set_page(sg, buf->pages[i], thislen, page_offset);
   2064			ret = actor(sg, data);
   2065			if (ret)
   2066				goto out;
   2067			page_len -= thislen;
   2068			i++;
   2069			page_offset = 0;
   2070			thislen = PAGE_SIZE;
   2071		} while (page_len != 0);
   2072		offset = 0;
   2073	}
   2074	if (len == 0)
   2075		goto out;
   2076	if (offset < buf->tail[0].iov_len) {
   2077		thislen = buf->tail[0].iov_len - offset;
   2078		if (thislen > len)
   2079			thislen = len;
   2080		sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
   2081		ret = actor(sg, data);
   2082		len -= thislen;
   2083	}
   2084	if (len != 0)
   2085		ret = -EINVAL;
   2086out:
   2087	return ret;
   2088}
   2089EXPORT_SYMBOL_GPL(xdr_process_buf);
   2090
   2091/**
   2092 * xdr_stream_decode_opaque - Decode variable length opaque
   2093 * @xdr: pointer to xdr_stream
   2094 * @ptr: location to store opaque data
   2095 * @size: size of storage buffer @ptr
   2096 *
   2097 * Return values:
   2098 *   On success, returns size of object stored in *@ptr
   2099 *   %-EBADMSG on XDR buffer overflow
   2100 *   %-EMSGSIZE on overflow of storage buffer @ptr
   2101 */
   2102ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
   2103{
   2104	ssize_t ret;
   2105	void *p;
   2106
   2107	ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
   2108	if (ret <= 0)
   2109		return ret;
   2110	memcpy(ptr, p, ret);
   2111	return ret;
   2112}
   2113EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
   2114
   2115/**
   2116 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
   2117 * @xdr: pointer to xdr_stream
   2118 * @ptr: location to store pointer to opaque data
   2119 * @maxlen: maximum acceptable object size
   2120 * @gfp_flags: GFP mask to use
   2121 *
   2122 * Return values:
   2123 *   On success, returns size of object stored in *@ptr
   2124 *   %-EBADMSG on XDR buffer overflow
   2125 *   %-EMSGSIZE if the size of the object would exceed @maxlen
   2126 *   %-ENOMEM on memory allocation failure
   2127 */
   2128ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
   2129		size_t maxlen, gfp_t gfp_flags)
   2130{
   2131	ssize_t ret;
   2132	void *p;
   2133
   2134	ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
   2135	if (ret > 0) {
   2136		*ptr = kmemdup(p, ret, gfp_flags);
   2137		if (*ptr != NULL)
   2138			return ret;
   2139		ret = -ENOMEM;
   2140	}
   2141	*ptr = NULL;
   2142	return ret;
   2143}
   2144EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
   2145
   2146/**
   2147 * xdr_stream_decode_string - Decode variable length string
   2148 * @xdr: pointer to xdr_stream
   2149 * @str: location to store string
   2150 * @size: size of storage buffer @str
   2151 *
   2152 * Return values:
   2153 *   On success, returns length of NUL-terminated string stored in *@str
   2154 *   %-EBADMSG on XDR buffer overflow
   2155 *   %-EMSGSIZE on overflow of storage buffer @str
   2156 */
   2157ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
   2158{
   2159	ssize_t ret;
   2160	void *p;
   2161
   2162	ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
   2163	if (ret > 0) {
   2164		memcpy(str, p, ret);
   2165		str[ret] = '\0';
   2166		return strlen(str);
   2167	}
   2168	*str = '\0';
   2169	return ret;
   2170}
   2171EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
   2172
   2173/**
   2174 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
   2175 * @xdr: pointer to xdr_stream
   2176 * @str: location to store pointer to string
   2177 * @maxlen: maximum acceptable string length
   2178 * @gfp_flags: GFP mask to use
   2179 *
   2180 * Return values:
   2181 *   On success, returns length of NUL-terminated string stored in *@ptr
   2182 *   %-EBADMSG on XDR buffer overflow
   2183 *   %-EMSGSIZE if the size of the string would exceed @maxlen
   2184 *   %-ENOMEM on memory allocation failure
   2185 */
   2186ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
   2187		size_t maxlen, gfp_t gfp_flags)
   2188{
   2189	void *p;
   2190	ssize_t ret;
   2191
   2192	ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
   2193	if (ret > 0) {
   2194		char *s = kmemdup_nul(p, ret, gfp_flags);
   2195		if (s != NULL) {
   2196			*str = s;
   2197			return strlen(s);
   2198		}
   2199		ret = -ENOMEM;
   2200	}
   2201	*str = NULL;
   2202	return ret;
   2203}
   2204EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);