cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cq.c (12866B)


      1// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
      2/*
      3 * Copyright(c) 2016 - 2018 Intel Corporation.
      4 */
      5
      6#include <linux/slab.h>
      7#include <linux/vmalloc.h>
      8#include "cq.h"
      9#include "vt.h"
     10#include "trace.h"
     11
     12static struct workqueue_struct *comp_vector_wq;
     13
     14/**
     15 * rvt_cq_enter - add a new entry to the completion queue
     16 * @cq: completion queue
     17 * @entry: work completion entry to add
     18 * @solicited: true if @entry is solicited
     19 *
     20 * This may be called with qp->s_lock held.
     21 *
     22 * Return: return true on success, else return
     23 * false if cq is full.
     24 */
     25bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
     26{
     27	struct ib_uverbs_wc *uqueue = NULL;
     28	struct ib_wc *kqueue = NULL;
     29	struct rvt_cq_wc *u_wc = NULL;
     30	struct rvt_k_cq_wc *k_wc = NULL;
     31	unsigned long flags;
     32	u32 head;
     33	u32 next;
     34	u32 tail;
     35
     36	spin_lock_irqsave(&cq->lock, flags);
     37
     38	if (cq->ip) {
     39		u_wc = cq->queue;
     40		uqueue = &u_wc->uqueue[0];
     41		head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
     42		tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
     43	} else {
     44		k_wc = cq->kqueue;
     45		kqueue = &k_wc->kqueue[0];
     46		head = k_wc->head;
     47		tail = k_wc->tail;
     48	}
     49
     50	/*
     51	 * Note that the head pointer might be writable by
     52	 * user processes.Take care to verify it is a sane value.
     53	 */
     54	if (head >= (unsigned)cq->ibcq.cqe) {
     55		head = cq->ibcq.cqe;
     56		next = 0;
     57	} else {
     58		next = head + 1;
     59	}
     60
     61	if (unlikely(next == tail || cq->cq_full)) {
     62		struct rvt_dev_info *rdi = cq->rdi;
     63
     64		if (!cq->cq_full)
     65			rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
     66		cq->cq_full = true;
     67		spin_unlock_irqrestore(&cq->lock, flags);
     68		if (cq->ibcq.event_handler) {
     69			struct ib_event ev;
     70
     71			ev.device = cq->ibcq.device;
     72			ev.element.cq = &cq->ibcq;
     73			ev.event = IB_EVENT_CQ_ERR;
     74			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
     75		}
     76		return false;
     77	}
     78	trace_rvt_cq_enter(cq, entry, head);
     79	if (uqueue) {
     80		uqueue[head].wr_id = entry->wr_id;
     81		uqueue[head].status = entry->status;
     82		uqueue[head].opcode = entry->opcode;
     83		uqueue[head].vendor_err = entry->vendor_err;
     84		uqueue[head].byte_len = entry->byte_len;
     85		uqueue[head].ex.imm_data = entry->ex.imm_data;
     86		uqueue[head].qp_num = entry->qp->qp_num;
     87		uqueue[head].src_qp = entry->src_qp;
     88		uqueue[head].wc_flags = entry->wc_flags;
     89		uqueue[head].pkey_index = entry->pkey_index;
     90		uqueue[head].slid = ib_lid_cpu16(entry->slid);
     91		uqueue[head].sl = entry->sl;
     92		uqueue[head].dlid_path_bits = entry->dlid_path_bits;
     93		uqueue[head].port_num = entry->port_num;
     94		/* Make sure entry is written before the head index. */
     95		RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
     96	} else {
     97		kqueue[head] = *entry;
     98		k_wc->head = next;
     99	}
    100
    101	if (cq->notify == IB_CQ_NEXT_COMP ||
    102	    (cq->notify == IB_CQ_SOLICITED &&
    103	     (solicited || entry->status != IB_WC_SUCCESS))) {
    104		/*
    105		 * This will cause send_complete() to be called in
    106		 * another thread.
    107		 */
    108		cq->notify = RVT_CQ_NONE;
    109		cq->triggered++;
    110		queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
    111			      &cq->comptask);
    112	}
    113
    114	spin_unlock_irqrestore(&cq->lock, flags);
    115	return true;
    116}
    117EXPORT_SYMBOL(rvt_cq_enter);
    118
    119static void send_complete(struct work_struct *work)
    120{
    121	struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
    122
    123	/*
    124	 * The completion handler will most likely rearm the notification
    125	 * and poll for all pending entries.  If a new completion entry
    126	 * is added while we are in this routine, queue_work()
    127	 * won't call us again until we return so we check triggered to
    128	 * see if we need to call the handler again.
    129	 */
    130	for (;;) {
    131		u8 triggered = cq->triggered;
    132
    133		/*
    134		 * IPoIB connected mode assumes the callback is from a
    135		 * soft IRQ. We simulate this by blocking "bottom halves".
    136		 * See the implementation for ipoib_cm_handle_tx_wc(),
    137		 * netif_tx_lock_bh() and netif_tx_lock().
    138		 */
    139		local_bh_disable();
    140		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
    141		local_bh_enable();
    142
    143		if (cq->triggered == triggered)
    144			return;
    145	}
    146}
    147
    148/**
    149 * rvt_create_cq - create a completion queue
    150 * @ibcq: Allocated CQ
    151 * @attr: creation attributes
    152 * @udata: user data for libibverbs.so
    153 *
    154 * Called by ib_create_cq() in the generic verbs code.
    155 *
    156 * Return: 0 on success
    157 */
    158int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
    159		  struct ib_udata *udata)
    160{
    161	struct ib_device *ibdev = ibcq->device;
    162	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
    163	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
    164	struct rvt_cq_wc *u_wc = NULL;
    165	struct rvt_k_cq_wc *k_wc = NULL;
    166	u32 sz;
    167	unsigned int entries = attr->cqe;
    168	int comp_vector = attr->comp_vector;
    169	int err;
    170
    171	if (attr->flags)
    172		return -EOPNOTSUPP;
    173
    174	if (entries < 1 || entries > rdi->dparms.props.max_cqe)
    175		return -EINVAL;
    176
    177	if (comp_vector < 0)
    178		comp_vector = 0;
    179
    180	comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
    181
    182	/*
    183	 * Allocate the completion queue entries and head/tail pointers.
    184	 * This is allocated separately so that it can be resized and
    185	 * also mapped into user space.
    186	 * We need to use vmalloc() in order to support mmap and large
    187	 * numbers of entries.
    188	 */
    189	if (udata && udata->outlen >= sizeof(__u64)) {
    190		sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
    191		sz += sizeof(*u_wc);
    192		u_wc = vmalloc_user(sz);
    193		if (!u_wc)
    194			return -ENOMEM;
    195	} else {
    196		sz = sizeof(struct ib_wc) * (entries + 1);
    197		sz += sizeof(*k_wc);
    198		k_wc = vzalloc_node(sz, rdi->dparms.node);
    199		if (!k_wc)
    200			return -ENOMEM;
    201	}
    202
    203	/*
    204	 * Return the address of the WC as the offset to mmap.
    205	 * See rvt_mmap() for details.
    206	 */
    207	if (udata && udata->outlen >= sizeof(__u64)) {
    208		cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
    209		if (IS_ERR(cq->ip)) {
    210			err = PTR_ERR(cq->ip);
    211			goto bail_wc;
    212		}
    213
    214		err = ib_copy_to_udata(udata, &cq->ip->offset,
    215				       sizeof(cq->ip->offset));
    216		if (err)
    217			goto bail_ip;
    218	}
    219
    220	spin_lock_irq(&rdi->n_cqs_lock);
    221	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
    222		spin_unlock_irq(&rdi->n_cqs_lock);
    223		err = -ENOMEM;
    224		goto bail_ip;
    225	}
    226
    227	rdi->n_cqs_allocated++;
    228	spin_unlock_irq(&rdi->n_cqs_lock);
    229
    230	if (cq->ip) {
    231		spin_lock_irq(&rdi->pending_lock);
    232		list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
    233		spin_unlock_irq(&rdi->pending_lock);
    234	}
    235
    236	/*
    237	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
    238	 * The number of entries should be >= the number requested or return
    239	 * an error.
    240	 */
    241	cq->rdi = rdi;
    242	if (rdi->driver_f.comp_vect_cpu_lookup)
    243		cq->comp_vector_cpu =
    244			rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
    245	else
    246		cq->comp_vector_cpu =
    247			cpumask_first(cpumask_of_node(rdi->dparms.node));
    248
    249	cq->ibcq.cqe = entries;
    250	cq->notify = RVT_CQ_NONE;
    251	spin_lock_init(&cq->lock);
    252	INIT_WORK(&cq->comptask, send_complete);
    253	if (u_wc)
    254		cq->queue = u_wc;
    255	else
    256		cq->kqueue = k_wc;
    257
    258	trace_rvt_create_cq(cq, attr);
    259	return 0;
    260
    261bail_ip:
    262	kfree(cq->ip);
    263bail_wc:
    264	vfree(u_wc);
    265	vfree(k_wc);
    266	return err;
    267}
    268
    269/**
    270 * rvt_destroy_cq - destroy a completion queue
    271 * @ibcq: the completion queue to destroy.
    272 * @udata: user data or NULL for kernel object
    273 *
    274 * Called by ib_destroy_cq() in the generic verbs code.
    275 */
    276int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
    277{
    278	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
    279	struct rvt_dev_info *rdi = cq->rdi;
    280
    281	flush_work(&cq->comptask);
    282	spin_lock_irq(&rdi->n_cqs_lock);
    283	rdi->n_cqs_allocated--;
    284	spin_unlock_irq(&rdi->n_cqs_lock);
    285	if (cq->ip)
    286		kref_put(&cq->ip->ref, rvt_release_mmap_info);
    287	else
    288		vfree(cq->kqueue);
    289	return 0;
    290}
    291
    292/**
    293 * rvt_req_notify_cq - change the notification type for a completion queue
    294 * @ibcq: the completion queue
    295 * @notify_flags: the type of notification to request
    296 *
    297 * This may be called from interrupt context.  Also called by
    298 * ib_req_notify_cq() in the generic verbs code.
    299 *
    300 * Return: 0 for success.
    301 */
    302int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
    303{
    304	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
    305	unsigned long flags;
    306	int ret = 0;
    307
    308	spin_lock_irqsave(&cq->lock, flags);
    309	/*
    310	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
    311	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
    312	 */
    313	if (cq->notify != IB_CQ_NEXT_COMP)
    314		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
    315
    316	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
    317		if (cq->queue) {
    318			if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
    319				RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
    320				ret = 1;
    321		} else {
    322			if (cq->kqueue->head != cq->kqueue->tail)
    323				ret = 1;
    324		}
    325	}
    326
    327	spin_unlock_irqrestore(&cq->lock, flags);
    328
    329	return ret;
    330}
    331
    332/*
    333 * rvt_resize_cq - change the size of the CQ
    334 * @ibcq: the completion queue
    335 *
    336 * Return: 0 for success.
    337 */
    338int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
    339{
    340	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
    341	u32 head, tail, n;
    342	int ret;
    343	u32 sz;
    344	struct rvt_dev_info *rdi = cq->rdi;
    345	struct rvt_cq_wc *u_wc = NULL;
    346	struct rvt_cq_wc *old_u_wc = NULL;
    347	struct rvt_k_cq_wc *k_wc = NULL;
    348	struct rvt_k_cq_wc *old_k_wc = NULL;
    349
    350	if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
    351		return -EINVAL;
    352
    353	/*
    354	 * Need to use vmalloc() if we want to support large #s of entries.
    355	 */
    356	if (udata && udata->outlen >= sizeof(__u64)) {
    357		sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
    358		sz += sizeof(*u_wc);
    359		u_wc = vmalloc_user(sz);
    360		if (!u_wc)
    361			return -ENOMEM;
    362	} else {
    363		sz = sizeof(struct ib_wc) * (cqe + 1);
    364		sz += sizeof(*k_wc);
    365		k_wc = vzalloc_node(sz, rdi->dparms.node);
    366		if (!k_wc)
    367			return -ENOMEM;
    368	}
    369	/* Check that we can write the offset to mmap. */
    370	if (udata && udata->outlen >= sizeof(__u64)) {
    371		__u64 offset = 0;
    372
    373		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
    374		if (ret)
    375			goto bail_free;
    376	}
    377
    378	spin_lock_irq(&cq->lock);
    379	/*
    380	 * Make sure head and tail are sane since they
    381	 * might be user writable.
    382	 */
    383	if (u_wc) {
    384		old_u_wc = cq->queue;
    385		head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
    386		tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
    387	} else {
    388		old_k_wc = cq->kqueue;
    389		head = old_k_wc->head;
    390		tail = old_k_wc->tail;
    391	}
    392
    393	if (head > (u32)cq->ibcq.cqe)
    394		head = (u32)cq->ibcq.cqe;
    395	if (tail > (u32)cq->ibcq.cqe)
    396		tail = (u32)cq->ibcq.cqe;
    397	if (head < tail)
    398		n = cq->ibcq.cqe + 1 + head - tail;
    399	else
    400		n = head - tail;
    401	if (unlikely((u32)cqe < n)) {
    402		ret = -EINVAL;
    403		goto bail_unlock;
    404	}
    405	for (n = 0; tail != head; n++) {
    406		if (u_wc)
    407			u_wc->uqueue[n] = old_u_wc->uqueue[tail];
    408		else
    409			k_wc->kqueue[n] = old_k_wc->kqueue[tail];
    410		if (tail == (u32)cq->ibcq.cqe)
    411			tail = 0;
    412		else
    413			tail++;
    414	}
    415	cq->ibcq.cqe = cqe;
    416	if (u_wc) {
    417		RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
    418		RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
    419		cq->queue = u_wc;
    420	} else {
    421		k_wc->head = n;
    422		k_wc->tail = 0;
    423		cq->kqueue = k_wc;
    424	}
    425	spin_unlock_irq(&cq->lock);
    426
    427	if (u_wc)
    428		vfree(old_u_wc);
    429	else
    430		vfree(old_k_wc);
    431
    432	if (cq->ip) {
    433		struct rvt_mmap_info *ip = cq->ip;
    434
    435		rvt_update_mmap_info(rdi, ip, sz, u_wc);
    436
    437		/*
    438		 * Return the offset to mmap.
    439		 * See rvt_mmap() for details.
    440		 */
    441		if (udata && udata->outlen >= sizeof(__u64)) {
    442			ret = ib_copy_to_udata(udata, &ip->offset,
    443					       sizeof(ip->offset));
    444			if (ret)
    445				return ret;
    446		}
    447
    448		spin_lock_irq(&rdi->pending_lock);
    449		if (list_empty(&ip->pending_mmaps))
    450			list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
    451		spin_unlock_irq(&rdi->pending_lock);
    452	}
    453
    454	return 0;
    455
    456bail_unlock:
    457	spin_unlock_irq(&cq->lock);
    458bail_free:
    459	vfree(u_wc);
    460	vfree(k_wc);
    461
    462	return ret;
    463}
    464
    465/**
    466 * rvt_poll_cq - poll for work completion entries
    467 * @ibcq: the completion queue to poll
    468 * @num_entries: the maximum number of entries to return
    469 * @entry: pointer to array where work completions are placed
    470 *
    471 * This may be called from interrupt context.  Also called by ib_poll_cq()
    472 * in the generic verbs code.
    473 *
    474 * Return: the number of completion entries polled.
    475 */
    476int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
    477{
    478	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
    479	struct rvt_k_cq_wc *wc;
    480	unsigned long flags;
    481	int npolled;
    482	u32 tail;
    483
    484	/* The kernel can only poll a kernel completion queue */
    485	if (cq->ip)
    486		return -EINVAL;
    487
    488	spin_lock_irqsave(&cq->lock, flags);
    489
    490	wc = cq->kqueue;
    491	tail = wc->tail;
    492	if (tail > (u32)cq->ibcq.cqe)
    493		tail = (u32)cq->ibcq.cqe;
    494	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
    495		if (tail == wc->head)
    496			break;
    497		/* The kernel doesn't need a RMB since it has the lock. */
    498		trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
    499		*entry = wc->kqueue[tail];
    500		if (tail >= cq->ibcq.cqe)
    501			tail = 0;
    502		else
    503			tail++;
    504	}
    505	wc->tail = tail;
    506
    507	spin_unlock_irqrestore(&cq->lock, flags);
    508
    509	return npolled;
    510}
    511
    512/**
    513 * rvt_driver_cq_init - Init cq resources on behalf of driver
    514 *
    515 * Return: 0 on success
    516 */
    517int rvt_driver_cq_init(void)
    518{
    519	comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
    520					 0, "rdmavt_cq");
    521	if (!comp_vector_wq)
    522		return -ENOMEM;
    523
    524	return 0;
    525}
    526
    527/**
    528 * rvt_cq_exit - tear down cq reources
    529 */
    530void rvt_cq_exit(void)
    531{
    532	destroy_workqueue(comp_vector_wq);
    533	comp_vector_wq = NULL;
    534}