cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tls_device_fallback.c (13259B)


      1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
      2 *
      3 * This software is available to you under a choice of one of two
      4 * licenses.  You may choose to be licensed under the terms of the GNU
      5 * General Public License (GPL) Version 2, available from the file
      6 * COPYING in the main directory of this source tree, or the
      7 * OpenIB.org BSD license below:
      8 *
      9 *     Redistribution and use in source and binary forms, with or
     10 *     without modification, are permitted provided that the following
     11 *     conditions are met:
     12 *
     13 *      - Redistributions of source code must retain the above
     14 *        copyright notice, this list of conditions and the following
     15 *        disclaimer.
     16 *
     17 *      - Redistributions in binary form must reproduce the above
     18 *        copyright notice, this list of conditions and the following
     19 *        disclaimer in the documentation and/or other materials
     20 *        provided with the distribution.
     21 *
     22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     29 * SOFTWARE.
     30 */
     31
     32#include <net/tls.h>
     33#include <crypto/aead.h>
     34#include <crypto/scatterwalk.h>
     35#include <net/ip6_checksum.h>
     36
     37static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
     38{
     39	struct scatterlist *src = walk->sg;
     40	int diff = walk->offset - src->offset;
     41
     42	sg_set_page(sg, sg_page(src),
     43		    src->length - diff, walk->offset);
     44
     45	scatterwalk_crypto_chain(sg, sg_next(src), 2);
     46}
     47
     48static int tls_enc_record(struct aead_request *aead_req,
     49			  struct crypto_aead *aead, char *aad,
     50			  char *iv, __be64 rcd_sn,
     51			  struct scatter_walk *in,
     52			  struct scatter_walk *out, int *in_len,
     53			  struct tls_prot_info *prot)
     54{
     55	unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
     56	struct scatterlist sg_in[3];
     57	struct scatterlist sg_out[3];
     58	u16 len;
     59	int rc;
     60
     61	len = min_t(int, *in_len, ARRAY_SIZE(buf));
     62
     63	scatterwalk_copychunks(buf, in, len, 0);
     64	scatterwalk_copychunks(buf, out, len, 1);
     65
     66	*in_len -= len;
     67	if (!*in_len)
     68		return 0;
     69
     70	scatterwalk_pagedone(in, 0, 1);
     71	scatterwalk_pagedone(out, 1, 1);
     72
     73	len = buf[4] | (buf[3] << 8);
     74	len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
     75
     76	tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
     77		(char *)&rcd_sn, buf[0], prot);
     78
     79	memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
     80	       TLS_CIPHER_AES_GCM_128_IV_SIZE);
     81
     82	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
     83	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
     84	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
     85	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
     86	chain_to_walk(sg_in + 1, in);
     87	chain_to_walk(sg_out + 1, out);
     88
     89	*in_len -= len;
     90	if (*in_len < 0) {
     91		*in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
     92		/* the input buffer doesn't contain the entire record.
     93		 * trim len accordingly. The resulting authentication tag
     94		 * will contain garbage, but we don't care, so we won't
     95		 * include any of it in the output skb
     96		 * Note that we assume the output buffer length
     97		 * is larger then input buffer length + tag size
     98		 */
     99		if (*in_len < 0)
    100			len += *in_len;
    101
    102		*in_len = 0;
    103	}
    104
    105	if (*in_len) {
    106		scatterwalk_copychunks(NULL, in, len, 2);
    107		scatterwalk_pagedone(in, 0, 1);
    108		scatterwalk_copychunks(NULL, out, len, 2);
    109		scatterwalk_pagedone(out, 1, 1);
    110	}
    111
    112	len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
    113	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
    114
    115	rc = crypto_aead_encrypt(aead_req);
    116
    117	return rc;
    118}
    119
    120static void tls_init_aead_request(struct aead_request *aead_req,
    121				  struct crypto_aead *aead)
    122{
    123	aead_request_set_tfm(aead_req, aead);
    124	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
    125}
    126
    127static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
    128						   gfp_t flags)
    129{
    130	unsigned int req_size = sizeof(struct aead_request) +
    131		crypto_aead_reqsize(aead);
    132	struct aead_request *aead_req;
    133
    134	aead_req = kzalloc(req_size, flags);
    135	if (aead_req)
    136		tls_init_aead_request(aead_req, aead);
    137	return aead_req;
    138}
    139
    140static int tls_enc_records(struct aead_request *aead_req,
    141			   struct crypto_aead *aead, struct scatterlist *sg_in,
    142			   struct scatterlist *sg_out, char *aad, char *iv,
    143			   u64 rcd_sn, int len, struct tls_prot_info *prot)
    144{
    145	struct scatter_walk out, in;
    146	int rc;
    147
    148	scatterwalk_start(&in, sg_in);
    149	scatterwalk_start(&out, sg_out);
    150
    151	do {
    152		rc = tls_enc_record(aead_req, aead, aad, iv,
    153				    cpu_to_be64(rcd_sn), &in, &out, &len, prot);
    154		rcd_sn++;
    155
    156	} while (rc == 0 && len);
    157
    158	scatterwalk_done(&in, 0, 0);
    159	scatterwalk_done(&out, 1, 0);
    160
    161	return rc;
    162}
    163
    164/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
    165 * might have been changed by NAT.
    166 */
    167static void update_chksum(struct sk_buff *skb, int headln)
    168{
    169	struct tcphdr *th = tcp_hdr(skb);
    170	int datalen = skb->len - headln;
    171	const struct ipv6hdr *ipv6h;
    172	const struct iphdr *iph;
    173
    174	/* We only changed the payload so if we are using partial we don't
    175	 * need to update anything.
    176	 */
    177	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
    178		return;
    179
    180	skb->ip_summed = CHECKSUM_PARTIAL;
    181	skb->csum_start = skb_transport_header(skb) - skb->head;
    182	skb->csum_offset = offsetof(struct tcphdr, check);
    183
    184	if (skb->sk->sk_family == AF_INET6) {
    185		ipv6h = ipv6_hdr(skb);
    186		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
    187					     datalen, IPPROTO_TCP, 0);
    188	} else {
    189		iph = ip_hdr(skb);
    190		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
    191					       IPPROTO_TCP, 0);
    192	}
    193}
    194
    195static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
    196{
    197	struct sock *sk = skb->sk;
    198	int delta;
    199
    200	skb_copy_header(nskb, skb);
    201
    202	skb_put(nskb, skb->len);
    203	memcpy(nskb->data, skb->data, headln);
    204
    205	nskb->destructor = skb->destructor;
    206	nskb->sk = sk;
    207	skb->destructor = NULL;
    208	skb->sk = NULL;
    209
    210	update_chksum(nskb, headln);
    211
    212	/* sock_efree means skb must gone through skb_orphan_partial() */
    213	if (nskb->destructor == sock_efree)
    214		return;
    215
    216	delta = nskb->truesize - skb->truesize;
    217	if (likely(delta < 0))
    218		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
    219	else if (delta)
    220		refcount_add(delta, &sk->sk_wmem_alloc);
    221}
    222
    223/* This function may be called after the user socket is already
    224 * closed so make sure we don't use anything freed during
    225 * tls_sk_proto_close here
    226 */
    227
    228static int fill_sg_in(struct scatterlist *sg_in,
    229		      struct sk_buff *skb,
    230		      struct tls_offload_context_tx *ctx,
    231		      u64 *rcd_sn,
    232		      s32 *sync_size,
    233		      int *resync_sgs)
    234{
    235	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
    236	int payload_len = skb->len - tcp_payload_offset;
    237	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
    238	struct tls_record_info *record;
    239	unsigned long flags;
    240	int remaining;
    241	int i;
    242
    243	spin_lock_irqsave(&ctx->lock, flags);
    244	record = tls_get_record(ctx, tcp_seq, rcd_sn);
    245	if (!record) {
    246		spin_unlock_irqrestore(&ctx->lock, flags);
    247		return -EINVAL;
    248	}
    249
    250	*sync_size = tcp_seq - tls_record_start_seq(record);
    251	if (*sync_size < 0) {
    252		int is_start_marker = tls_record_is_start_marker(record);
    253
    254		spin_unlock_irqrestore(&ctx->lock, flags);
    255		/* This should only occur if the relevant record was
    256		 * already acked. In that case it should be ok
    257		 * to drop the packet and avoid retransmission.
    258		 *
    259		 * There is a corner case where the packet contains
    260		 * both an acked and a non-acked record.
    261		 * We currently don't handle that case and rely
    262		 * on TCP to retranmit a packet that doesn't contain
    263		 * already acked payload.
    264		 */
    265		if (!is_start_marker)
    266			*sync_size = 0;
    267		return -EINVAL;
    268	}
    269
    270	remaining = *sync_size;
    271	for (i = 0; remaining > 0; i++) {
    272		skb_frag_t *frag = &record->frags[i];
    273
    274		__skb_frag_ref(frag);
    275		sg_set_page(sg_in + i, skb_frag_page(frag),
    276			    skb_frag_size(frag), skb_frag_off(frag));
    277
    278		remaining -= skb_frag_size(frag);
    279
    280		if (remaining < 0)
    281			sg_in[i].length += remaining;
    282	}
    283	*resync_sgs = i;
    284
    285	spin_unlock_irqrestore(&ctx->lock, flags);
    286	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
    287		return -EINVAL;
    288
    289	return 0;
    290}
    291
    292static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
    293			struct tls_context *tls_ctx,
    294			struct sk_buff *nskb,
    295			int tcp_payload_offset,
    296			int payload_len,
    297			int sync_size,
    298			void *dummy_buf)
    299{
    300	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
    301	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
    302	/* Add room for authentication tag produced by crypto */
    303	dummy_buf += sync_size;
    304	sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
    305}
    306
    307static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
    308				   struct scatterlist sg_out[3],
    309				   struct scatterlist *sg_in,
    310				   struct sk_buff *skb,
    311				   s32 sync_size, u64 rcd_sn)
    312{
    313	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
    314	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
    315	int payload_len = skb->len - tcp_payload_offset;
    316	void *buf, *iv, *aad, *dummy_buf;
    317	struct aead_request *aead_req;
    318	struct sk_buff *nskb = NULL;
    319	int buf_len;
    320
    321	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
    322	if (!aead_req)
    323		return NULL;
    324
    325	buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
    326		  TLS_CIPHER_AES_GCM_128_IV_SIZE +
    327		  TLS_AAD_SPACE_SIZE +
    328		  sync_size +
    329		  TLS_CIPHER_AES_GCM_128_TAG_SIZE;
    330	buf = kmalloc(buf_len, GFP_ATOMIC);
    331	if (!buf)
    332		goto free_req;
    333
    334	iv = buf;
    335	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
    336	       TLS_CIPHER_AES_GCM_128_SALT_SIZE);
    337	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
    338	      TLS_CIPHER_AES_GCM_128_IV_SIZE;
    339	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
    340
    341	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
    342	if (!nskb)
    343		goto free_buf;
    344
    345	skb_reserve(nskb, skb_headroom(skb));
    346
    347	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
    348		    payload_len, sync_size, dummy_buf);
    349
    350	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
    351			    rcd_sn, sync_size + payload_len,
    352			    &tls_ctx->prot_info) < 0)
    353		goto free_nskb;
    354
    355	complete_skb(nskb, skb, tcp_payload_offset);
    356
    357	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
    358	 * nskb->prev will point to the skb itself
    359	 */
    360	nskb->prev = nskb;
    361
    362free_buf:
    363	kfree(buf);
    364free_req:
    365	kfree(aead_req);
    366	return nskb;
    367free_nskb:
    368	kfree_skb(nskb);
    369	nskb = NULL;
    370	goto free_buf;
    371}
    372
    373static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
    374{
    375	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
    376	struct tls_context *tls_ctx = tls_get_ctx(sk);
    377	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
    378	int payload_len = skb->len - tcp_payload_offset;
    379	struct scatterlist *sg_in, sg_out[3];
    380	struct sk_buff *nskb = NULL;
    381	int sg_in_max_elements;
    382	int resync_sgs = 0;
    383	s32 sync_size = 0;
    384	u64 rcd_sn;
    385
    386	/* worst case is:
    387	 * MAX_SKB_FRAGS in tls_record_info
    388	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
    389	 */
    390	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
    391
    392	if (!payload_len)
    393		return skb;
    394
    395	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
    396	if (!sg_in)
    397		goto free_orig;
    398
    399	sg_init_table(sg_in, sg_in_max_elements);
    400	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
    401
    402	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
    403		/* bypass packets before kernel TLS socket option was set */
    404		if (sync_size < 0 && payload_len <= -sync_size)
    405			nskb = skb_get(skb);
    406		goto put_sg;
    407	}
    408
    409	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
    410
    411put_sg:
    412	while (resync_sgs)
    413		put_page(sg_page(&sg_in[--resync_sgs]));
    414	kfree(sg_in);
    415free_orig:
    416	if (nskb)
    417		consume_skb(skb);
    418	else
    419		kfree_skb(skb);
    420	return nskb;
    421}
    422
    423struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
    424				      struct net_device *dev,
    425				      struct sk_buff *skb)
    426{
    427	if (dev == tls_get_ctx(sk)->netdev || netif_is_bond_master(dev))
    428		return skb;
    429
    430	return tls_sw_fallback(sk, skb);
    431}
    432EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
    433
    434struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
    435					 struct net_device *dev,
    436					 struct sk_buff *skb)
    437{
    438	return tls_sw_fallback(sk, skb);
    439}
    440
    441struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
    442{
    443	return tls_sw_fallback(skb->sk, skb);
    444}
    445EXPORT_SYMBOL_GPL(tls_encrypt_skb);
    446
    447int tls_sw_fallback_init(struct sock *sk,
    448			 struct tls_offload_context_tx *offload_ctx,
    449			 struct tls_crypto_info *crypto_info)
    450{
    451	const u8 *key;
    452	int rc;
    453
    454	offload_ctx->aead_send =
    455	    crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
    456	if (IS_ERR(offload_ctx->aead_send)) {
    457		rc = PTR_ERR(offload_ctx->aead_send);
    458		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
    459		offload_ctx->aead_send = NULL;
    460		goto err_out;
    461	}
    462
    463	key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
    464
    465	rc = crypto_aead_setkey(offload_ctx->aead_send, key,
    466				TLS_CIPHER_AES_GCM_128_KEY_SIZE);
    467	if (rc)
    468		goto free_aead;
    469
    470	rc = crypto_aead_setauthsize(offload_ctx->aead_send,
    471				     TLS_CIPHER_AES_GCM_128_TAG_SIZE);
    472	if (rc)
    473		goto free_aead;
    474
    475	return 0;
    476free_aead:
    477	crypto_free_aead(offload_ctx->aead_send);
    478err_out:
    479	return rc;
    480}