cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rxe_cq.c (3458B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
      4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
      5 */
      6#include <linux/vmalloc.h>
      7#include "rxe.h"
      8#include "rxe_loc.h"
      9#include "rxe_queue.h"
     10
     11int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
     12		    int cqe, int comp_vector)
     13{
     14	int count;
     15
     16	if (cqe <= 0) {
     17		pr_warn("cqe(%d) <= 0\n", cqe);
     18		goto err1;
     19	}
     20
     21	if (cqe > rxe->attr.max_cqe) {
     22		pr_warn("cqe(%d) > max_cqe(%d)\n",
     23			cqe, rxe->attr.max_cqe);
     24		goto err1;
     25	}
     26
     27	if (cq) {
     28		count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
     29		if (cqe < count) {
     30			pr_warn("cqe(%d) < current # elements in queue (%d)",
     31				cqe, count);
     32			goto err1;
     33		}
     34	}
     35
     36	return 0;
     37
     38err1:
     39	return -EINVAL;
     40}
     41
     42static void rxe_send_complete(struct tasklet_struct *t)
     43{
     44	struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
     45	unsigned long flags;
     46
     47	spin_lock_irqsave(&cq->cq_lock, flags);
     48	if (cq->is_dying) {
     49		spin_unlock_irqrestore(&cq->cq_lock, flags);
     50		return;
     51	}
     52	spin_unlock_irqrestore(&cq->cq_lock, flags);
     53
     54	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
     55}
     56
     57int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
     58		     int comp_vector, struct ib_udata *udata,
     59		     struct rxe_create_cq_resp __user *uresp)
     60{
     61	int err;
     62	enum queue_type type;
     63
     64	type = QUEUE_TYPE_TO_CLIENT;
     65	cq->queue = rxe_queue_init(rxe, &cqe,
     66			sizeof(struct rxe_cqe), type);
     67	if (!cq->queue) {
     68		pr_warn("unable to create cq\n");
     69		return -ENOMEM;
     70	}
     71
     72	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
     73			   cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
     74	if (err) {
     75		vfree(cq->queue->buf);
     76		kfree(cq->queue);
     77		return err;
     78	}
     79
     80	cq->is_user = uresp;
     81
     82	cq->is_dying = false;
     83
     84	tasklet_setup(&cq->comp_task, rxe_send_complete);
     85
     86	spin_lock_init(&cq->cq_lock);
     87	cq->ibcq.cqe = cqe;
     88	return 0;
     89}
     90
     91int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
     92			struct rxe_resize_cq_resp __user *uresp,
     93			struct ib_udata *udata)
     94{
     95	int err;
     96
     97	err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
     98			       sizeof(struct rxe_cqe), udata,
     99			       uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
    100	if (!err)
    101		cq->ibcq.cqe = cqe;
    102
    103	return err;
    104}
    105
    106int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
    107{
    108	struct ib_event ev;
    109	int full;
    110	void *addr;
    111	unsigned long flags;
    112
    113	spin_lock_irqsave(&cq->cq_lock, flags);
    114
    115	full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
    116	if (unlikely(full)) {
    117		spin_unlock_irqrestore(&cq->cq_lock, flags);
    118		if (cq->ibcq.event_handler) {
    119			ev.device = cq->ibcq.device;
    120			ev.element.cq = &cq->ibcq;
    121			ev.event = IB_EVENT_CQ_ERR;
    122			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
    123		}
    124
    125		return -EBUSY;
    126	}
    127
    128	addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
    129	memcpy(addr, cqe, sizeof(*cqe));
    130
    131	queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
    132
    133	spin_unlock_irqrestore(&cq->cq_lock, flags);
    134
    135	if ((cq->notify == IB_CQ_NEXT_COMP) ||
    136	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
    137		cq->notify = 0;
    138		tasklet_schedule(&cq->comp_task);
    139	}
    140
    141	return 0;
    142}
    143
    144void rxe_cq_disable(struct rxe_cq *cq)
    145{
    146	unsigned long flags;
    147
    148	spin_lock_irqsave(&cq->cq_lock, flags);
    149	cq->is_dying = true;
    150	spin_unlock_irqrestore(&cq->cq_lock, flags);
    151}
    152
    153void rxe_cq_cleanup(struct rxe_pool_elem *elem)
    154{
    155	struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
    156
    157	if (cq->queue)
    158		rxe_queue_cleanup(cq->queue);
    159}