cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mbox.c (9957B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell RVU Admin Function driver
      3 *
      4 * Copyright (C) 2018 Marvell.
      5 *
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/interrupt.h>
     10#include <linux/pci.h>
     11
     12#include "rvu_reg.h"
     13#include "mbox.h"
     14#include "rvu_trace.h"
     15
     16static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
     17
     18void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
     19{
     20	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
     21	struct mbox_hdr *tx_hdr, *rx_hdr;
     22	void *hw_mbase = mdev->hwbase;
     23
     24	tx_hdr = hw_mbase + mbox->tx_start;
     25	rx_hdr = hw_mbase + mbox->rx_start;
     26
     27	mdev->msg_size = 0;
     28	mdev->rsp_size = 0;
     29	tx_hdr->num_msgs = 0;
     30	tx_hdr->msg_size = 0;
     31	rx_hdr->num_msgs = 0;
     32	rx_hdr->msg_size = 0;
     33}
     34EXPORT_SYMBOL(__otx2_mbox_reset);
     35
     36void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
     37{
     38	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
     39
     40	spin_lock(&mdev->mbox_lock);
     41	__otx2_mbox_reset(mbox, devid);
     42	spin_unlock(&mdev->mbox_lock);
     43}
     44EXPORT_SYMBOL(otx2_mbox_reset);
     45
     46void otx2_mbox_destroy(struct otx2_mbox *mbox)
     47{
     48	mbox->reg_base = NULL;
     49	mbox->hwbase = NULL;
     50
     51	kfree(mbox->dev);
     52	mbox->dev = NULL;
     53}
     54EXPORT_SYMBOL(otx2_mbox_destroy);
     55
     56static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
     57			   void *reg_base, int direction, int ndevs)
     58{
     59	switch (direction) {
     60	case MBOX_DIR_AFPF:
     61	case MBOX_DIR_PFVF:
     62		mbox->tx_start = MBOX_DOWN_TX_START;
     63		mbox->rx_start = MBOX_DOWN_RX_START;
     64		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
     65		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
     66		break;
     67	case MBOX_DIR_PFAF:
     68	case MBOX_DIR_VFPF:
     69		mbox->tx_start = MBOX_DOWN_RX_START;
     70		mbox->rx_start = MBOX_DOWN_TX_START;
     71		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
     72		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
     73		break;
     74	case MBOX_DIR_AFPF_UP:
     75	case MBOX_DIR_PFVF_UP:
     76		mbox->tx_start = MBOX_UP_TX_START;
     77		mbox->rx_start = MBOX_UP_RX_START;
     78		mbox->tx_size  = MBOX_UP_TX_SIZE;
     79		mbox->rx_size  = MBOX_UP_RX_SIZE;
     80		break;
     81	case MBOX_DIR_PFAF_UP:
     82	case MBOX_DIR_VFPF_UP:
     83		mbox->tx_start = MBOX_UP_RX_START;
     84		mbox->rx_start = MBOX_UP_TX_START;
     85		mbox->tx_size  = MBOX_UP_RX_SIZE;
     86		mbox->rx_size  = MBOX_UP_TX_SIZE;
     87		break;
     88	default:
     89		return -ENODEV;
     90	}
     91
     92	switch (direction) {
     93	case MBOX_DIR_AFPF:
     94	case MBOX_DIR_AFPF_UP:
     95		mbox->trigger = RVU_AF_AFPF_MBOX0;
     96		mbox->tr_shift = 4;
     97		break;
     98	case MBOX_DIR_PFAF:
     99	case MBOX_DIR_PFAF_UP:
    100		mbox->trigger = RVU_PF_PFAF_MBOX1;
    101		mbox->tr_shift = 0;
    102		break;
    103	case MBOX_DIR_PFVF:
    104	case MBOX_DIR_PFVF_UP:
    105		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
    106		mbox->tr_shift = 12;
    107		break;
    108	case MBOX_DIR_VFPF:
    109	case MBOX_DIR_VFPF_UP:
    110		mbox->trigger = RVU_VF_VFPF_MBOX1;
    111		mbox->tr_shift = 0;
    112		break;
    113	default:
    114		return -ENODEV;
    115	}
    116
    117	mbox->reg_base = reg_base;
    118	mbox->pdev = pdev;
    119
    120	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
    121	if (!mbox->dev) {
    122		otx2_mbox_destroy(mbox);
    123		return -ENOMEM;
    124	}
    125	mbox->ndevs = ndevs;
    126
    127	return 0;
    128}
    129
    130int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
    131		   void *reg_base, int direction, int ndevs)
    132{
    133	struct otx2_mbox_dev *mdev;
    134	int devid, err;
    135
    136	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
    137	if (err)
    138		return err;
    139
    140	mbox->hwbase = hwbase;
    141
    142	for (devid = 0; devid < ndevs; devid++) {
    143		mdev = &mbox->dev[devid];
    144		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
    145		mdev->hwbase = mdev->mbase;
    146		spin_lock_init(&mdev->mbox_lock);
    147		/* Init header to reset value */
    148		otx2_mbox_reset(mbox, devid);
    149	}
    150
    151	return 0;
    152}
    153EXPORT_SYMBOL(otx2_mbox_init);
    154
    155/* Initialize mailbox with the set of mailbox region addresses
    156 * in the array hwbase.
    157 */
    158int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
    159			   struct pci_dev *pdev, void *reg_base,
    160			   int direction, int ndevs)
    161{
    162	struct otx2_mbox_dev *mdev;
    163	int devid, err;
    164
    165	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
    166	if (err)
    167		return err;
    168
    169	mbox->hwbase = hwbase[0];
    170
    171	for (devid = 0; devid < ndevs; devid++) {
    172		mdev = &mbox->dev[devid];
    173		mdev->mbase = hwbase[devid];
    174		mdev->hwbase = hwbase[devid];
    175		spin_lock_init(&mdev->mbox_lock);
    176		/* Init header to reset value */
    177		otx2_mbox_reset(mbox, devid);
    178	}
    179
    180	return 0;
    181}
    182EXPORT_SYMBOL(otx2_mbox_regions_init);
    183
    184int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
    185{
    186	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
    187	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    188	struct device *sender = &mbox->pdev->dev;
    189
    190	while (!time_after(jiffies, timeout)) {
    191		if (mdev->num_msgs == mdev->msgs_acked)
    192			return 0;
    193		usleep_range(800, 1000);
    194	}
    195	dev_dbg(sender, "timed out while waiting for rsp\n");
    196	return -EIO;
    197}
    198EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
    199
    200int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
    201{
    202	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    203	unsigned long timeout = jiffies + 1 * HZ;
    204
    205	while (!time_after(jiffies, timeout)) {
    206		if (mdev->num_msgs == mdev->msgs_acked)
    207			return 0;
    208		cpu_relax();
    209	}
    210	return -EIO;
    211}
    212EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
    213
    214void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
    215{
    216	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    217	struct mbox_hdr *tx_hdr, *rx_hdr;
    218	void *hw_mbase = mdev->hwbase;
    219
    220	tx_hdr = hw_mbase + mbox->tx_start;
    221	rx_hdr = hw_mbase + mbox->rx_start;
    222
    223	/* If bounce buffer is implemented copy mbox messages from
    224	 * bounce buffer to hw mbox memory.
    225	 */
    226	if (mdev->mbase != hw_mbase)
    227		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
    228		       mdev->mbase + mbox->tx_start + msgs_offset,
    229		       mdev->msg_size);
    230
    231	spin_lock(&mdev->mbox_lock);
    232
    233	tx_hdr->msg_size = mdev->msg_size;
    234
    235	/* Reset header for next messages */
    236	mdev->msg_size = 0;
    237	mdev->rsp_size = 0;
    238	mdev->msgs_acked = 0;
    239
    240	/* Sync mbox data into memory */
    241	smp_wmb();
    242
    243	/* num_msgs != 0 signals to the peer that the buffer has a number of
    244	 * messages.  So this should be written after writing all the messages
    245	 * to the shared memory.
    246	 */
    247	tx_hdr->num_msgs = mdev->num_msgs;
    248	rx_hdr->num_msgs = 0;
    249
    250	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
    251
    252	spin_unlock(&mdev->mbox_lock);
    253
    254	/* The interrupt should be fired after num_msgs is written
    255	 * to the shared memory
    256	 */
    257	writeq(1, (void __iomem *)mbox->reg_base +
    258	       (mbox->trigger | (devid << mbox->tr_shift)));
    259}
    260EXPORT_SYMBOL(otx2_mbox_msg_send);
    261
    262struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
    263					    int size, int size_rsp)
    264{
    265	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    266	struct mbox_msghdr *msghdr = NULL;
    267
    268	spin_lock(&mdev->mbox_lock);
    269	size = ALIGN(size, MBOX_MSG_ALIGN);
    270	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
    271	/* Check if there is space in mailbox */
    272	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
    273		goto exit;
    274	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
    275		goto exit;
    276
    277	if (mdev->msg_size == 0)
    278		mdev->num_msgs = 0;
    279	mdev->num_msgs++;
    280
    281	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
    282
    283	/* Clear the whole msg region */
    284	memset(msghdr, 0, size);
    285	/* Init message header with reset values */
    286	msghdr->ver = OTX2_MBOX_VERSION;
    287	mdev->msg_size += size;
    288	mdev->rsp_size += size_rsp;
    289	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
    290exit:
    291	spin_unlock(&mdev->mbox_lock);
    292
    293	return msghdr;
    294}
    295EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
    296
    297struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
    298				      struct mbox_msghdr *msg)
    299{
    300	unsigned long imsg = mbox->tx_start + msgs_offset;
    301	unsigned long irsp = mbox->rx_start + msgs_offset;
    302	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    303	u16 msgs;
    304
    305	spin_lock(&mdev->mbox_lock);
    306
    307	if (mdev->num_msgs != mdev->msgs_acked)
    308		goto error;
    309
    310	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
    311		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
    312		struct mbox_msghdr *prsp = mdev->mbase + irsp;
    313
    314		if (msg == pmsg) {
    315			if (pmsg->id != prsp->id)
    316				goto error;
    317			spin_unlock(&mdev->mbox_lock);
    318			return prsp;
    319		}
    320
    321		imsg = mbox->tx_start + pmsg->next_msgoff;
    322		irsp = mbox->rx_start + prsp->next_msgoff;
    323	}
    324
    325error:
    326	spin_unlock(&mdev->mbox_lock);
    327	return ERR_PTR(-ENODEV);
    328}
    329EXPORT_SYMBOL(otx2_mbox_get_rsp);
    330
    331int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
    332{
    333	unsigned long ireq = mbox->tx_start + msgs_offset;
    334	unsigned long irsp = mbox->rx_start + msgs_offset;
    335	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    336	int rc = -ENODEV;
    337	u16 msgs;
    338
    339	spin_lock(&mdev->mbox_lock);
    340
    341	if (mdev->num_msgs != mdev->msgs_acked)
    342		goto exit;
    343
    344	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
    345		struct mbox_msghdr *preq = mdev->mbase + ireq;
    346		struct mbox_msghdr *prsp = mdev->mbase + irsp;
    347
    348		if (preq->id != prsp->id) {
    349			trace_otx2_msg_check(mbox->pdev, preq->id,
    350					     prsp->id, prsp->rc);
    351			goto exit;
    352		}
    353		if (prsp->rc) {
    354			rc = prsp->rc;
    355			trace_otx2_msg_check(mbox->pdev, preq->id,
    356					     prsp->id, prsp->rc);
    357			goto exit;
    358		}
    359
    360		ireq = mbox->tx_start + preq->next_msgoff;
    361		irsp = mbox->rx_start + prsp->next_msgoff;
    362	}
    363	rc = 0;
    364exit:
    365	spin_unlock(&mdev->mbox_lock);
    366	return rc;
    367}
    368EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
    369
    370int
    371otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
    372{
    373	struct msg_rsp *rsp;
    374
    375	rsp = (struct msg_rsp *)
    376	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
    377	if (!rsp)
    378		return -ENOMEM;
    379	rsp->hdr.id = id;
    380	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
    381	rsp->hdr.rc = MBOX_MSG_INVALID;
    382	rsp->hdr.pcifunc = pcifunc;
    383	return 0;
    384}
    385EXPORT_SYMBOL(otx2_reply_invalid_msg);
    386
    387bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
    388{
    389	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
    390	bool ret;
    391
    392	spin_lock(&mdev->mbox_lock);
    393	ret = mdev->num_msgs != 0;
    394	spin_unlock(&mdev->mbox_lock);
    395
    396	return ret;
    397}
    398EXPORT_SYMBOL(otx2_mbox_nonempty);
    399
    400const char *otx2_mbox_id2name(u16 id)
    401{
    402	switch (id) {
    403#define M(_name, _id, _1, _2, _3) case _id: return # _name;
    404	MBOX_MESSAGES
    405#undef M
    406	default:
    407		return "INVALID ID";
    408	}
    409}
    410EXPORT_SYMBOL(otx2_mbox_id2name);
    411
    412MODULE_AUTHOR("Marvell.");
    413MODULE_LICENSE("GPL v2");