cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gss_krb5_wrap.c (17471B)


      1/*
      2 * COPYRIGHT (c) 2008
      3 * The Regents of the University of Michigan
      4 * ALL RIGHTS RESERVED
      5 *
      6 * Permission is granted to use, copy, create derivative works
      7 * and redistribute this software and such derivative works
      8 * for any purpose, so long as the name of The University of
      9 * Michigan is not used in any advertising or publicity
     10 * pertaining to the use of distribution of this software
     11 * without specific, written prior authorization.  If the
     12 * above copyright notice or any other identification of the
     13 * University of Michigan is included in any copy of any
     14 * portion of this software, then the disclaimer below must
     15 * also be included.
     16 *
     17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
     18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
     19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
     20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
     21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
     22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
     23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
     24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
     25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
     26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
     27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
     28 * SUCH DAMAGES.
     29 */
     30
     31#include <crypto/skcipher.h>
     32#include <linux/types.h>
     33#include <linux/jiffies.h>
     34#include <linux/sunrpc/gss_krb5.h>
     35#include <linux/random.h>
     36#include <linux/pagemap.h>
     37
     38#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
     39# define RPCDBG_FACILITY	RPCDBG_AUTH
     40#endif
     41
     42static inline int
     43gss_krb5_padding(int blocksize, int length)
     44{
     45	return blocksize - (length % blocksize);
     46}
     47
     48static inline void
     49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
     50{
     51	int padding = gss_krb5_padding(blocksize, buf->len - offset);
     52	char *p;
     53	struct kvec *iov;
     54
     55	if (buf->page_len || buf->tail[0].iov_len)
     56		iov = &buf->tail[0];
     57	else
     58		iov = &buf->head[0];
     59	p = iov->iov_base + iov->iov_len;
     60	iov->iov_len += padding;
     61	buf->len += padding;
     62	memset(p, padding, padding);
     63}
     64
     65static inline int
     66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
     67{
     68	u8 *ptr;
     69	u8 pad;
     70	size_t len = buf->len;
     71
     72	if (len <= buf->head[0].iov_len) {
     73		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
     74		if (pad > buf->head[0].iov_len)
     75			return -EINVAL;
     76		buf->head[0].iov_len -= pad;
     77		goto out;
     78	} else
     79		len -= buf->head[0].iov_len;
     80	if (len <= buf->page_len) {
     81		unsigned int last = (buf->page_base + len - 1)
     82					>>PAGE_SHIFT;
     83		unsigned int offset = (buf->page_base + len - 1)
     84					& (PAGE_SIZE - 1);
     85		ptr = kmap_atomic(buf->pages[last]);
     86		pad = *(ptr + offset);
     87		kunmap_atomic(ptr);
     88		goto out;
     89	} else
     90		len -= buf->page_len;
     91	BUG_ON(len > buf->tail[0].iov_len);
     92	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
     93out:
     94	/* XXX: NOTE: we do not adjust the page lengths--they represent
     95	 * a range of data in the real filesystem page cache, and we need
     96	 * to know that range so the xdr code can properly place read data.
     97	 * However adjusting the head length, as we do above, is harmless.
     98	 * In the case of a request that fits into a single page, the server
     99	 * also uses length and head length together to determine the original
    100	 * start of the request to copy the request for deferal; so it's
    101	 * easier on the server if we adjust head and tail length in tandem.
    102	 * It's not really a problem that we don't fool with the page and
    103	 * tail lengths, though--at worst badly formed xdr might lead the
    104	 * server to attempt to parse the padding.
    105	 * XXX: Document all these weird requirements for gss mechanism
    106	 * wrap/unwrap functions. */
    107	if (pad > blocksize)
    108		return -EINVAL;
    109	if (buf->len > pad)
    110		buf->len -= pad;
    111	else
    112		return -EINVAL;
    113	return 0;
    114}
    115
    116void
    117gss_krb5_make_confounder(char *p, u32 conflen)
    118{
    119	static u64 i = 0;
    120	u64 *q = (u64 *)p;
    121
    122	/* rfc1964 claims this should be "random".  But all that's really
    123	 * necessary is that it be unique.  And not even that is necessary in
    124	 * our case since our "gssapi" implementation exists only to support
    125	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
    126	 * already begin with a unique sequence number.  Just to hedge my bets
    127	 * I'll make a half-hearted attempt at something unique, but ensuring
    128	 * uniqueness would mean worrying about atomicity and rollover, and I
    129	 * don't care enough. */
    130
    131	/* initialize to random value */
    132	if (i == 0) {
    133		i = prandom_u32();
    134		i = (i << 32) | prandom_u32();
    135	}
    136
    137	switch (conflen) {
    138	case 16:
    139		*q++ = i++;
    140		fallthrough;
    141	case 8:
    142		*q++ = i++;
    143		break;
    144	default:
    145		BUG();
    146	}
    147}
    148
    149/* Assumptions: the head and tail of inbuf are ours to play with.
    150 * The pages, however, may be real pages in the page cache and we replace
    151 * them with scratch pages from **pages before writing to them. */
    152/* XXX: obviously the above should be documentation of wrap interface,
    153 * and shouldn't be in this kerberos-specific file. */
    154
    155/* XXX factor out common code with seal/unseal. */
    156
    157static u32
    158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
    159		struct xdr_buf *buf, struct page **pages)
    160{
    161	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
    162	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
    163					    .data = cksumdata};
    164	int			blocksize = 0, plainlen;
    165	unsigned char		*ptr, *msg_start;
    166	time64_t		now;
    167	int			headlen;
    168	struct page		**tmp_pages;
    169	u32			seq_send;
    170	u8			*cksumkey;
    171	u32			conflen = kctx->gk5e->conflen;
    172
    173	dprintk("RPC:       %s\n", __func__);
    174
    175	now = ktime_get_real_seconds();
    176
    177	blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
    178	gss_krb5_add_padding(buf, offset, blocksize);
    179	BUG_ON((buf->len - offset) % blocksize);
    180	plainlen = conflen + buf->len - offset;
    181
    182	headlen = g_token_size(&kctx->mech_used,
    183		GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
    184		(buf->len - offset);
    185
    186	ptr = buf->head[0].iov_base + offset;
    187	/* shift data to make room for header. */
    188	xdr_extend_head(buf, offset, headlen);
    189
    190	/* XXX Would be cleverer to encrypt while copying. */
    191	BUG_ON((buf->len - offset - headlen) % blocksize);
    192
    193	g_make_token_header(&kctx->mech_used,
    194				GSS_KRB5_TOK_HDR_LEN +
    195				kctx->gk5e->cksumlength + plainlen, &ptr);
    196
    197
    198	/* ptr now at header described in rfc 1964, section 1.2.1: */
    199	ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
    200	ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
    201
    202	msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
    203
    204	/*
    205	 * signalg and sealalg are stored as if they were converted from LE
    206	 * to host endian, even though they're opaque pairs of bytes according
    207	 * to the RFC.
    208	 */
    209	*(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
    210	*(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
    211	ptr[6] = 0xff;
    212	ptr[7] = 0xff;
    213
    214	gss_krb5_make_confounder(msg_start, conflen);
    215
    216	if (kctx->gk5e->keyed_cksum)
    217		cksumkey = kctx->cksum;
    218	else
    219		cksumkey = NULL;
    220
    221	/* XXXJBF: UGH!: */
    222	tmp_pages = buf->pages;
    223	buf->pages = pages;
    224	if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
    225					cksumkey, KG_USAGE_SEAL, &md5cksum))
    226		return GSS_S_FAILURE;
    227	buf->pages = tmp_pages;
    228
    229	memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
    230
    231	seq_send = atomic_fetch_inc(&kctx->seq_send);
    232
    233	/* XXX would probably be more efficient to compute checksum
    234	 * and encrypt at the same time: */
    235	if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
    236			       seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
    237		return GSS_S_FAILURE;
    238
    239	if (gss_encrypt_xdr_buf(kctx->enc, buf,
    240				offset + headlen - conflen, pages))
    241		return GSS_S_FAILURE;
    242
    243	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
    244}
    245
    246static u32
    247gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
    248		       struct xdr_buf *buf, unsigned int *slack,
    249		       unsigned int *align)
    250{
    251	int			signalg;
    252	int			sealalg;
    253	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
    254	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
    255					    .data = cksumdata};
    256	time64_t		now;
    257	int			direction;
    258	s32			seqnum;
    259	unsigned char		*ptr;
    260	int			bodysize;
    261	void			*data_start, *orig_start;
    262	int			data_len;
    263	int			blocksize;
    264	u32			conflen = kctx->gk5e->conflen;
    265	int			crypt_offset;
    266	u8			*cksumkey;
    267	unsigned int		saved_len = buf->len;
    268
    269	dprintk("RPC:       gss_unwrap_kerberos\n");
    270
    271	ptr = (u8 *)buf->head[0].iov_base + offset;
    272	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
    273					len - offset))
    274		return GSS_S_DEFECTIVE_TOKEN;
    275
    276	if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
    277	    (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
    278		return GSS_S_DEFECTIVE_TOKEN;
    279
    280	/* XXX sanity-check bodysize?? */
    281
    282	/* get the sign and seal algorithms */
    283
    284	signalg = ptr[2] + (ptr[3] << 8);
    285	if (signalg != kctx->gk5e->signalg)
    286		return GSS_S_DEFECTIVE_TOKEN;
    287
    288	sealalg = ptr[4] + (ptr[5] << 8);
    289	if (sealalg != kctx->gk5e->sealalg)
    290		return GSS_S_DEFECTIVE_TOKEN;
    291
    292	if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
    293		return GSS_S_DEFECTIVE_TOKEN;
    294
    295	/*
    296	 * Data starts after token header and checksum.  ptr points
    297	 * to the beginning of the token header
    298	 */
    299	crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
    300					(unsigned char *)buf->head[0].iov_base;
    301
    302	buf->len = len;
    303	if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
    304		return GSS_S_DEFECTIVE_TOKEN;
    305
    306	if (kctx->gk5e->keyed_cksum)
    307		cksumkey = kctx->cksum;
    308	else
    309		cksumkey = NULL;
    310
    311	if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
    312					cksumkey, KG_USAGE_SEAL, &md5cksum))
    313		return GSS_S_FAILURE;
    314
    315	if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
    316						kctx->gk5e->cksumlength))
    317		return GSS_S_BAD_SIG;
    318
    319	/* it got through unscathed.  Make sure the context is unexpired */
    320
    321	now = ktime_get_real_seconds();
    322
    323	if (now > kctx->endtime)
    324		return GSS_S_CONTEXT_EXPIRED;
    325
    326	/* do sequencing checks */
    327
    328	if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
    329			     ptr + 8, &direction, &seqnum))
    330		return GSS_S_BAD_SIG;
    331
    332	if ((kctx->initiate && direction != 0xff) ||
    333	    (!kctx->initiate && direction != 0))
    334		return GSS_S_BAD_SIG;
    335
    336	/* Copy the data back to the right position.  XXX: Would probably be
    337	 * better to copy and encrypt at the same time. */
    338
    339	blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
    340	data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
    341					conflen;
    342	orig_start = buf->head[0].iov_base + offset;
    343	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
    344	memmove(orig_start, data_start, data_len);
    345	buf->head[0].iov_len -= (data_start - orig_start);
    346	buf->len = len - (data_start - orig_start);
    347
    348	if (gss_krb5_remove_padding(buf, blocksize))
    349		return GSS_S_DEFECTIVE_TOKEN;
    350
    351	/* slack must include room for krb5 padding */
    352	*slack = XDR_QUADLEN(saved_len - buf->len);
    353	/* The GSS blob always precedes the RPC message payload */
    354	*align = *slack;
    355	return GSS_S_COMPLETE;
    356}
    357
    358/*
    359 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
    360 * to do more than that, we shift repeatedly.  Kevin Coffman reports
    361 * seeing 28 bytes as the value used by Microsoft clients and servers
    362 * with AES, so this constant is chosen to allow handling 28 in one pass
    363 * without using too much stack space.
    364 *
    365 * If that proves to a problem perhaps we could use a more clever
    366 * algorithm.
    367 */
    368#define LOCAL_BUF_LEN 32u
    369
    370static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
    371{
    372	char head[LOCAL_BUF_LEN];
    373	char tmp[LOCAL_BUF_LEN];
    374	unsigned int this_len, i;
    375
    376	BUG_ON(shift > LOCAL_BUF_LEN);
    377
    378	read_bytes_from_xdr_buf(buf, 0, head, shift);
    379	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
    380		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
    381		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
    382		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
    383	}
    384	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
    385}
    386
    387static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
    388{
    389	int shifted = 0;
    390	int this_shift;
    391
    392	shift %= buf->len;
    393	while (shifted < shift) {
    394		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
    395		rotate_buf_a_little(buf, this_shift);
    396		shifted += this_shift;
    397	}
    398}
    399
    400static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
    401{
    402	struct xdr_buf subbuf;
    403
    404	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
    405	_rotate_left(&subbuf, shift);
    406}
    407
    408static u32
    409gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
    410		     struct xdr_buf *buf, struct page **pages)
    411{
    412	u8		*ptr;
    413	time64_t	now;
    414	u8		flags = 0x00;
    415	__be16		*be16ptr;
    416	__be64		*be64ptr;
    417	u32		err;
    418
    419	dprintk("RPC:       %s\n", __func__);
    420
    421	if (kctx->gk5e->encrypt_v2 == NULL)
    422		return GSS_S_FAILURE;
    423
    424	/* make room for gss token header */
    425	if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
    426		return GSS_S_FAILURE;
    427
    428	/* construct gss token header */
    429	ptr = buf->head[0].iov_base + offset;
    430	*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
    431	*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
    432
    433	if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
    434		flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
    435	if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
    436		flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
    437	/* We always do confidentiality in wrap tokens */
    438	flags |= KG2_TOKEN_FLAG_SEALED;
    439
    440	*ptr++ = flags;
    441	*ptr++ = 0xff;
    442	be16ptr = (__be16 *)ptr;
    443
    444	*be16ptr++ = 0;
    445	/* "inner" token header always uses 0 for RRC */
    446	*be16ptr++ = 0;
    447
    448	be64ptr = (__be64 *)be16ptr;
    449	*be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
    450
    451	err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
    452	if (err)
    453		return err;
    454
    455	now = ktime_get_real_seconds();
    456	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
    457}
    458
    459static u32
    460gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
    461		       struct xdr_buf *buf, unsigned int *slack,
    462		       unsigned int *align)
    463{
    464	time64_t	now;
    465	u8		*ptr;
    466	u8		flags = 0x00;
    467	u16		ec, rrc;
    468	int		err;
    469	u32		headskip, tailskip;
    470	u8		decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
    471	unsigned int	movelen;
    472
    473
    474	dprintk("RPC:       %s\n", __func__);
    475
    476	if (kctx->gk5e->decrypt_v2 == NULL)
    477		return GSS_S_FAILURE;
    478
    479	ptr = buf->head[0].iov_base + offset;
    480
    481	if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
    482		return GSS_S_DEFECTIVE_TOKEN;
    483
    484	flags = ptr[2];
    485	if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
    486	    (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
    487		return GSS_S_BAD_SIG;
    488
    489	if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
    490		dprintk("%s: token missing expected sealed flag\n", __func__);
    491		return GSS_S_DEFECTIVE_TOKEN;
    492	}
    493
    494	if (ptr[3] != 0xff)
    495		return GSS_S_DEFECTIVE_TOKEN;
    496
    497	ec = be16_to_cpup((__be16 *)(ptr + 4));
    498	rrc = be16_to_cpup((__be16 *)(ptr + 6));
    499
    500	/*
    501	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
    502	 * doesn't want it checked; see page 6 of rfc 2203.
    503	 */
    504
    505	if (rrc != 0)
    506		rotate_left(offset + 16, buf, rrc);
    507
    508	err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
    509					&headskip, &tailskip);
    510	if (err)
    511		return GSS_S_FAILURE;
    512
    513	/*
    514	 * Retrieve the decrypted gss token header and verify
    515	 * it against the original
    516	 */
    517	err = read_bytes_from_xdr_buf(buf,
    518				len - GSS_KRB5_TOK_HDR_LEN - tailskip,
    519				decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
    520	if (err) {
    521		dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
    522		return GSS_S_FAILURE;
    523	}
    524	if (memcmp(ptr, decrypted_hdr, 6)
    525				|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
    526		dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
    527		return GSS_S_FAILURE;
    528	}
    529
    530	/* do sequencing checks */
    531
    532	/* it got through unscathed.  Make sure the context is unexpired */
    533	now = ktime_get_real_seconds();
    534	if (now > kctx->endtime)
    535		return GSS_S_CONTEXT_EXPIRED;
    536
    537	/*
    538	 * Move the head data back to the right position in xdr_buf.
    539	 * We ignore any "ec" data since it might be in the head or
    540	 * the tail, and we really don't need to deal with it.
    541	 * Note that buf->head[0].iov_len may indicate the available
    542	 * head buffer space rather than that actually occupied.
    543	 */
    544	movelen = min_t(unsigned int, buf->head[0].iov_len, len);
    545	movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
    546	BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
    547							buf->head[0].iov_len);
    548	memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
    549	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
    550	buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
    551
    552	/* Trim off the trailing "extra count" and checksum blob */
    553	xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
    554
    555	*align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
    556	*slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
    557	return GSS_S_COMPLETE;
    558}
    559
    560u32
    561gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
    562		  struct xdr_buf *buf, struct page **pages)
    563{
    564	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
    565
    566	switch (kctx->enctype) {
    567	default:
    568		BUG();
    569	case ENCTYPE_DES_CBC_RAW:
    570	case ENCTYPE_DES3_CBC_RAW:
    571		return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
    572	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
    573	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
    574		return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
    575	}
    576}
    577
    578u32
    579gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
    580		    int len, struct xdr_buf *buf)
    581{
    582	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
    583
    584	switch (kctx->enctype) {
    585	default:
    586		BUG();
    587	case ENCTYPE_DES_CBC_RAW:
    588	case ENCTYPE_DES3_CBC_RAW:
    589		return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
    590					      &gctx->slack, &gctx->align);
    591	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
    592	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
    593		return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
    594					      &gctx->slack, &gctx->align);
    595	}
    596}