cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ccm.c (4932B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
      3
      4#include <linux/bitops.h>
      5
      6#include "ccm.h"
      7#include "nfp_app.h"
      8#include "nfp_net.h"
      9
     10#define ccm_warn(app, msg...)	nn_dp_warn(&(app)->ctrl->dp, msg)
     11
     12#define NFP_CCM_TAG_ALLOC_SPAN	(U16_MAX / 4)
     13
     14static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
     15{
     16	u16 used_tags;
     17
     18	used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
     19
     20	return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
     21}
     22
     23static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
     24{
     25	/* CCM is for FW communication which is request-reply.  To make sure
     26	 * we don't reuse the message ID too early after timeout - limit the
     27	 * number of requests in flight.
     28	 */
     29	if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
     30		ccm_warn(ccm->app, "all FW request contexts busy!\n");
     31		return -EAGAIN;
     32	}
     33
     34	WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
     35	return ccm->tag_alloc_next++;
     36}
     37
     38static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
     39{
     40	WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
     41
     42	while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
     43	       ccm->tag_alloc_last != ccm->tag_alloc_next)
     44		ccm->tag_alloc_last++;
     45}
     46
     47static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
     48{
     49	unsigned int msg_tag;
     50	struct sk_buff *skb;
     51
     52	skb_queue_walk(&ccm->replies, skb) {
     53		msg_tag = nfp_ccm_get_tag(skb);
     54		if (msg_tag == tag) {
     55			nfp_ccm_free_tag(ccm, tag);
     56			__skb_unlink(skb, &ccm->replies);
     57			return skb;
     58		}
     59	}
     60
     61	return NULL;
     62}
     63
     64static struct sk_buff *
     65nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
     66{
     67	struct sk_buff *skb;
     68
     69	nfp_ctrl_lock(app->ctrl);
     70	skb = __nfp_ccm_reply(ccm, tag);
     71	nfp_ctrl_unlock(app->ctrl);
     72
     73	return skb;
     74}
     75
     76static struct sk_buff *
     77nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
     78{
     79	struct sk_buff *skb;
     80
     81	nfp_ctrl_lock(app->ctrl);
     82	skb = __nfp_ccm_reply(ccm, tag);
     83	if (!skb)
     84		nfp_ccm_free_tag(ccm, tag);
     85	nfp_ctrl_unlock(app->ctrl);
     86
     87	return skb;
     88}
     89
     90static struct sk_buff *
     91nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
     92		   enum nfp_ccm_type type, int tag)
     93{
     94	struct sk_buff *skb;
     95	int i, err;
     96
     97	for (i = 0; i < 50; i++) {
     98		udelay(4);
     99		skb = nfp_ccm_reply(ccm, app, tag);
    100		if (skb)
    101			return skb;
    102	}
    103
    104	err = wait_event_interruptible_timeout(ccm->wq,
    105					       skb = nfp_ccm_reply(ccm, app,
    106								   tag),
    107					       msecs_to_jiffies(5000));
    108	/* We didn't get a response - try last time and atomically drop
    109	 * the tag even if no response is matched.
    110	 */
    111	if (!skb)
    112		skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
    113	if (err < 0) {
    114		ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
    115			 err == ERESTARTSYS ? "interrupted" : "error",
    116			 type, err);
    117		return ERR_PTR(err);
    118	}
    119	if (!skb) {
    120		ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
    121		return ERR_PTR(-ETIMEDOUT);
    122	}
    123
    124	return skb;
    125}
    126
    127struct sk_buff *
    128nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
    129		    enum nfp_ccm_type type, unsigned int reply_size)
    130{
    131	struct nfp_app *app = ccm->app;
    132	struct nfp_ccm_hdr *hdr;
    133	int reply_type, tag;
    134
    135	nfp_ctrl_lock(app->ctrl);
    136	tag = nfp_ccm_alloc_tag(ccm);
    137	if (tag < 0) {
    138		nfp_ctrl_unlock(app->ctrl);
    139		dev_kfree_skb_any(skb);
    140		return ERR_PTR(tag);
    141	}
    142
    143	hdr = (void *)skb->data;
    144	hdr->ver = NFP_CCM_ABI_VERSION;
    145	hdr->type = type;
    146	hdr->tag = cpu_to_be16(tag);
    147
    148	__nfp_app_ctrl_tx(app, skb);
    149
    150	nfp_ctrl_unlock(app->ctrl);
    151
    152	skb = nfp_ccm_wait_reply(ccm, app, type, tag);
    153	if (IS_ERR(skb))
    154		return skb;
    155
    156	reply_type = nfp_ccm_get_type(skb);
    157	if (reply_type != __NFP_CCM_REPLY(type)) {
    158		ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
    159			 reply_type, __NFP_CCM_REPLY(type));
    160		goto err_free;
    161	}
    162	/* 0 reply_size means caller will do the validation */
    163	if (reply_size && skb->len != reply_size) {
    164		ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
    165			 type, skb->len, reply_size);
    166		goto err_free;
    167	}
    168
    169	return skb;
    170err_free:
    171	dev_kfree_skb_any(skb);
    172	return ERR_PTR(-EIO);
    173}
    174
    175void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
    176{
    177	struct nfp_app *app = ccm->app;
    178	unsigned int tag;
    179
    180	if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
    181		ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
    182		goto err_free;
    183	}
    184
    185	nfp_ctrl_lock(app->ctrl);
    186
    187	tag = nfp_ccm_get_tag(skb);
    188	if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
    189		ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
    190			 tag);
    191		goto err_unlock;
    192	}
    193
    194	__skb_queue_tail(&ccm->replies, skb);
    195	wake_up_interruptible_all(&ccm->wq);
    196
    197	nfp_ctrl_unlock(app->ctrl);
    198	return;
    199
    200err_unlock:
    201	nfp_ctrl_unlock(app->ctrl);
    202err_free:
    203	dev_kfree_skb_any(skb);
    204}
    205
    206int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
    207{
    208	ccm->app = app;
    209	skb_queue_head_init(&ccm->replies);
    210	init_waitqueue_head(&ccm->wq);
    211	return 0;
    212}
    213
    214void nfp_ccm_clean(struct nfp_ccm *ccm)
    215{
    216	WARN_ON(!skb_queue_empty(&ccm->replies));
    217}