cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

chtls_hw.c (12034B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2018 Chelsio Communications, Inc.
      4 *
      5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/list.h>
     10#include <linux/workqueue.h>
     11#include <linux/skbuff.h>
     12#include <linux/timer.h>
     13#include <linux/notifier.h>
     14#include <linux/inetdevice.h>
     15#include <linux/ip.h>
     16#include <linux/tcp.h>
     17#include <linux/tls.h>
     18#include <net/tls.h>
     19
     20#include "chtls.h"
     21#include "chtls_cm.h"
     22
     23static void __set_tcb_field_direct(struct chtls_sock *csk,
     24				   struct cpl_set_tcb_field *req, u16 word,
     25				   u64 mask, u64 val, u8 cookie, int no_reply)
     26{
     27	struct ulptx_idata *sc;
     28
     29	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid);
     30	req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid));
     31	req->reply_ctrl = htons(NO_REPLY_V(no_reply) |
     32				QUEUENO_V(csk->rss_qid));
     33	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
     34	req->mask = cpu_to_be64(mask);
     35	req->val = cpu_to_be64(val);
     36	sc = (struct ulptx_idata *)(req + 1);
     37	sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
     38	sc->len = htonl(0);
     39}
     40
     41static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
     42			    u64 mask, u64 val, u8 cookie, int no_reply)
     43{
     44	struct cpl_set_tcb_field *req;
     45	struct chtls_sock *csk;
     46	struct ulptx_idata *sc;
     47	unsigned int wrlen;
     48
     49	wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
     50	csk = rcu_dereference_sk_user_data(sk);
     51
     52	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
     53	__set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply);
     54	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
     55}
     56
     57/*
     58 * Send control message to HW, message go as immediate data and packet
     59 * is freed immediately.
     60 */
     61static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
     62{
     63	struct cpl_set_tcb_field *req;
     64	unsigned int credits_needed;
     65	struct chtls_sock *csk;
     66	struct ulptx_idata *sc;
     67	struct sk_buff *skb;
     68	unsigned int wrlen;
     69	int ret;
     70
     71	wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
     72
     73	skb = alloc_skb(wrlen, GFP_ATOMIC);
     74	if (!skb)
     75		return -ENOMEM;
     76
     77	credits_needed = DIV_ROUND_UP(wrlen, 16);
     78	csk = rcu_dereference_sk_user_data(sk);
     79
     80	__set_tcb_field(sk, skb, word, mask, val, 0, 1);
     81	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
     82	csk->wr_credits -= credits_needed;
     83	csk->wr_unacked += credits_needed;
     84	enqueue_wr(csk, skb);
     85	ret = cxgb4_ofld_send(csk->egress_dev, skb);
     86	if (ret < 0)
     87		kfree_skb(skb);
     88	return ret < 0 ? ret : 0;
     89}
     90
     91void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
     92				 u64 mask, u64 val, u8 cookie,
     93				 int through_l2t)
     94{
     95	struct sk_buff *skb;
     96	unsigned int wrlen;
     97
     98	wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
     99	wrlen = roundup(wrlen, 16);
    100
    101	skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
    102	if (!skb)
    103		return;
    104
    105	__set_tcb_field(sk, skb, word, mask, val, cookie, 0);
    106	send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
    107}
    108
    109/*
    110 * Set one of the t_flags bits in the TCB.
    111 */
    112int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
    113{
    114	return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
    115				   (u64)val << bit_pos);
    116}
    117
    118static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
    119{
    120	return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
    121}
    122
    123static int chtls_set_tcb_seqno(struct sock *sk)
    124{
    125	return chtls_set_tcb_field(sk, 28, ~0ULL, 0);
    126}
    127
    128static int chtls_set_tcb_quiesce(struct sock *sk, int val)
    129{
    130	return chtls_set_tcb_field(sk, 1, (1ULL << TF_RX_QUIESCE_S),
    131				   TF_RX_QUIESCE_V(val));
    132}
    133
    134void chtls_set_quiesce_ctrl(struct sock *sk, int val)
    135{
    136	struct chtls_sock *csk;
    137	struct sk_buff *skb;
    138	unsigned int wrlen;
    139	int ret;
    140
    141	wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
    142	wrlen = roundup(wrlen, 16);
    143
    144	skb = alloc_skb(wrlen, GFP_ATOMIC);
    145	if (!skb)
    146		return;
    147
    148	csk = rcu_dereference_sk_user_data(sk);
    149
    150	__set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
    151	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
    152	ret = cxgb4_ofld_send(csk->egress_dev, skb);
    153	if (ret < 0)
    154		kfree_skb(skb);
    155}
    156
    157/* TLS Key bitmap processing */
    158int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
    159{
    160	unsigned int num_key_ctx, bsize;
    161	int ksize;
    162
    163	num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ);
    164	bsize = BITS_TO_LONGS(num_key_ctx);
    165
    166	cdev->kmap.size = num_key_ctx;
    167	cdev->kmap.available = bsize;
    168	ksize = sizeof(*cdev->kmap.addr) * bsize;
    169	cdev->kmap.addr = kvzalloc(ksize, GFP_KERNEL);
    170	if (!cdev->kmap.addr)
    171		return -ENOMEM;
    172
    173	cdev->kmap.start = lldi->vr->key.start;
    174	spin_lock_init(&cdev->kmap.lock);
    175	return 0;
    176}
    177
    178static int get_new_keyid(struct chtls_sock *csk, u32 optname)
    179{
    180	struct net_device *dev = csk->egress_dev;
    181	struct chtls_dev *cdev = csk->cdev;
    182	struct chtls_hws *hws;
    183	struct adapter *adap;
    184	int keyid;
    185
    186	adap = netdev2adap(dev);
    187	hws = &csk->tlshws;
    188
    189	spin_lock_bh(&cdev->kmap.lock);
    190	keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size);
    191	if (keyid < cdev->kmap.size) {
    192		__set_bit(keyid, cdev->kmap.addr);
    193		if (optname == TLS_RX)
    194			hws->rxkey = keyid;
    195		else
    196			hws->txkey = keyid;
    197		atomic_inc(&adap->chcr_stats.tls_key);
    198	} else {
    199		keyid = -1;
    200	}
    201	spin_unlock_bh(&cdev->kmap.lock);
    202	return keyid;
    203}
    204
    205void free_tls_keyid(struct sock *sk)
    206{
    207	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
    208	struct net_device *dev = csk->egress_dev;
    209	struct chtls_dev *cdev = csk->cdev;
    210	struct chtls_hws *hws;
    211	struct adapter *adap;
    212
    213	if (!cdev->kmap.addr)
    214		return;
    215
    216	adap = netdev2adap(dev);
    217	hws = &csk->tlshws;
    218
    219	spin_lock_bh(&cdev->kmap.lock);
    220	if (hws->rxkey >= 0) {
    221		__clear_bit(hws->rxkey, cdev->kmap.addr);
    222		atomic_dec(&adap->chcr_stats.tls_key);
    223		hws->rxkey = -1;
    224	}
    225	if (hws->txkey >= 0) {
    226		__clear_bit(hws->txkey, cdev->kmap.addr);
    227		atomic_dec(&adap->chcr_stats.tls_key);
    228		hws->txkey = -1;
    229	}
    230	spin_unlock_bh(&cdev->kmap.lock);
    231}
    232
    233unsigned int keyid_to_addr(int start_addr, int keyid)
    234{
    235	return (start_addr + (keyid * TLS_KEY_CONTEXT_SZ)) >> 5;
    236}
    237
    238static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
    239{
    240	kctx->iv_to_auth = cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) |
    241				  KEYCTX_TX_WR_AAD_V(1ULL) |
    242				  KEYCTX_TX_WR_AADST_V(5ULL) |
    243				  KEYCTX_TX_WR_CIPHER_V(14ULL) |
    244				  KEYCTX_TX_WR_CIPHERST_V(0ULL) |
    245				  KEYCTX_TX_WR_AUTH_V(14ULL) |
    246				  KEYCTX_TX_WR_AUTHST_V(16ULL) |
    247				  KEYCTX_TX_WR_AUTHIN_V(16ULL));
    248}
    249
    250static int chtls_key_info(struct chtls_sock *csk,
    251			  struct _key_ctx *kctx,
    252			  u32 keylen, u32 optname,
    253			  int cipher_type)
    254{
    255	unsigned char key[AES_MAX_KEY_SIZE];
    256	unsigned char *key_p, *salt;
    257	unsigned char ghash_h[AEAD_H_SIZE];
    258	int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
    259	struct crypto_aes_ctx aes;
    260	int ret;
    261
    262	key_ctx_size = sizeof(struct _key_ctx) +
    263		       roundup(keylen, 16) + AEAD_H_SIZE;
    264
    265	/* GCM mode of AES supports 128 and 256 bit encryption, so
    266	 * prepare key context base on GCM cipher type
    267	 */
    268	switch (cipher_type) {
    269	case TLS_CIPHER_AES_GCM_128: {
    270		struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
    271			(struct tls12_crypto_info_aes_gcm_128 *)
    272					&csk->tlshws.crypto_info;
    273		memcpy(key, gcm_ctx_128->key, keylen);
    274
    275		key_p            = gcm_ctx_128->key;
    276		salt             = gcm_ctx_128->salt;
    277		ck_size          = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
    278		salt_size        = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
    279		kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
    280		break;
    281	}
    282	case TLS_CIPHER_AES_GCM_256: {
    283		struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
    284			(struct tls12_crypto_info_aes_gcm_256 *)
    285					&csk->tlshws.crypto_info;
    286		memcpy(key, gcm_ctx_256->key, keylen);
    287
    288		key_p            = gcm_ctx_256->key;
    289		salt             = gcm_ctx_256->salt;
    290		ck_size          = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
    291		salt_size        = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
    292		kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
    293		break;
    294	}
    295	default:
    296		pr_err("GCM: Invalid key length %d\n", keylen);
    297		return -EINVAL;
    298	}
    299
    300	/* Calculate the H = CIPH(K, 0 repeated 16 times).
    301	 * It will go in key context
    302	 */
    303	ret = aes_expandkey(&aes, key, keylen);
    304	if (ret)
    305		return ret;
    306
    307	memset(ghash_h, 0, AEAD_H_SIZE);
    308	aes_encrypt(&aes, ghash_h, ghash_h);
    309	memzero_explicit(&aes, sizeof(aes));
    310	csk->tlshws.keylen = key_ctx_size;
    311
    312	/* Copy the Key context */
    313	if (optname == TLS_RX) {
    314		int key_ctx;
    315
    316		key_ctx = ((key_ctx_size >> 4) << 3);
    317		kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
    318						 kctx_mackey_size,
    319						 0, 0, key_ctx);
    320		chtls_rxkey_ivauth(kctx);
    321	} else {
    322		kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
    323						 kctx_mackey_size,
    324						 0, 0, key_ctx_size >> 4);
    325	}
    326
    327	memcpy(kctx->salt, salt, salt_size);
    328	memcpy(kctx->key, key_p, keylen);
    329	memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
    330	/* erase key info from driver */
    331	memset(key_p, 0, keylen);
    332
    333	return 0;
    334}
    335
    336static void chtls_set_scmd(struct chtls_sock *csk)
    337{
    338	struct chtls_hws *hws = &csk->tlshws;
    339
    340	hws->scmd.seqno_numivs =
    341		SCMD_SEQ_NO_CTRL_V(3) |
    342		SCMD_PROTO_VERSION_V(0) |
    343		SCMD_ENC_DEC_CTRL_V(0) |
    344		SCMD_CIPH_AUTH_SEQ_CTRL_V(1) |
    345		SCMD_CIPH_MODE_V(2) |
    346		SCMD_AUTH_MODE_V(4) |
    347		SCMD_HMAC_CTRL_V(0) |
    348		SCMD_IV_SIZE_V(4) |
    349		SCMD_NUM_IVS_V(1);
    350
    351	hws->scmd.ivgen_hdrlen =
    352		SCMD_IV_GEN_CTRL_V(1) |
    353		SCMD_KEY_CTX_INLINE_V(0) |
    354		SCMD_TLS_FRAG_ENABLE_V(1);
    355}
    356
    357int chtls_setkey(struct chtls_sock *csk, u32 keylen,
    358		 u32 optname, int cipher_type)
    359{
    360	struct tls_key_req *kwr;
    361	struct chtls_dev *cdev;
    362	struct _key_ctx *kctx;
    363	int wrlen, klen, len;
    364	struct sk_buff *skb;
    365	struct sock *sk;
    366	int keyid;
    367	int kaddr;
    368	int ret;
    369
    370	cdev = csk->cdev;
    371	sk = csk->sk;
    372
    373	klen = roundup((keylen + AEAD_H_SIZE) + sizeof(*kctx), 32);
    374	wrlen = roundup(sizeof(*kwr), 16);
    375	len = klen + wrlen;
    376
    377	/* Flush out-standing data before new key takes effect */
    378	if (optname == TLS_TX) {
    379		lock_sock(sk);
    380		if (skb_queue_len(&csk->txq))
    381			chtls_push_frames(csk, 0);
    382		release_sock(sk);
    383	}
    384
    385	skb = alloc_skb(len, GFP_KERNEL);
    386	if (!skb)
    387		return -ENOMEM;
    388
    389	keyid = get_new_keyid(csk, optname);
    390	if (keyid < 0) {
    391		ret = -ENOSPC;
    392		goto out_nokey;
    393	}
    394
    395	kaddr = keyid_to_addr(cdev->kmap.start, keyid);
    396	kwr = (struct tls_key_req *)__skb_put_zero(skb, len);
    397	kwr->wr.op_to_compl =
    398		cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F |
    399		      FW_WR_ATOMIC_V(1U));
    400	kwr->wr.flowid_len16 =
    401		cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16) |
    402			    FW_WR_FLOWID_V(csk->tid)));
    403	kwr->wr.protocol = 0;
    404	kwr->wr.mfs = htons(TLS_MFS);
    405	kwr->wr.reneg_to_write_rx = optname;
    406
    407	/* ulptx command */
    408	kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
    409			    T5_ULP_MEMIO_ORDER_V(1) |
    410			    T5_ULP_MEMIO_IMM_V(1));
    411	kwr->req.len16 = cpu_to_be32((csk->tid << 8) |
    412			      DIV_ROUND_UP(len - sizeof(kwr->wr), 16));
    413	kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5));
    414	kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr));
    415
    416	/* sub command */
    417	kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
    418	kwr->sc_imm.len = cpu_to_be32(klen);
    419
    420	lock_sock(sk);
    421	/* key info */
    422	kctx = (struct _key_ctx *)(kwr + 1);
    423	ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
    424	if (ret)
    425		goto out_notcb;
    426
    427	if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
    428		goto out_notcb;
    429
    430	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
    431	csk->wr_credits -= DIV_ROUND_UP(len, 16);
    432	csk->wr_unacked += DIV_ROUND_UP(len, 16);
    433	enqueue_wr(csk, skb);
    434	cxgb4_ofld_send(csk->egress_dev, skb);
    435	skb = NULL;
    436
    437	chtls_set_scmd(csk);
    438	/* Clear quiesce for Rx key */
    439	if (optname == TLS_RX) {
    440		ret = chtls_set_tcb_keyid(sk, keyid);
    441		if (ret)
    442			goto out_notcb;
    443		ret = chtls_set_tcb_field(sk, 0,
    444					  TCB_ULP_RAW_V(TCB_ULP_RAW_M),
    445					  TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) |
    446					  TF_TLS_CONTROL_V(1) |
    447					  TF_TLS_ACTIVE_V(1) |
    448					  TF_TLS_ENABLE_V(1))));
    449		if (ret)
    450			goto out_notcb;
    451		ret = chtls_set_tcb_seqno(sk);
    452		if (ret)
    453			goto out_notcb;
    454		ret = chtls_set_tcb_quiesce(sk, 0);
    455		if (ret)
    456			goto out_notcb;
    457		csk->tlshws.rxkey = keyid;
    458	} else {
    459		csk->tlshws.tx_seq_no = 0;
    460		csk->tlshws.txkey = keyid;
    461	}
    462
    463	release_sock(sk);
    464	return ret;
    465out_notcb:
    466	release_sock(sk);
    467	free_tls_keyid(sk);
    468out_nokey:
    469	kfree_skb(skb);
    470	return ret;
    471}