cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

security.c (18472B)


      1/*
      2 * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/security.h>
     34#include <linux/completion.h>
     35#include <linux/list.h>
     36
     37#include <rdma/ib_verbs.h>
     38#include <rdma/ib_cache.h>
     39#include "core_priv.h"
     40#include "mad_priv.h"
     41
     42static LIST_HEAD(mad_agent_list);
     43/* Lock to protect mad_agent_list */
     44static DEFINE_SPINLOCK(mad_agent_list_lock);
     45
     46static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
     47{
     48	struct pkey_index_qp_list *pkey = NULL;
     49	struct pkey_index_qp_list *tmp_pkey;
     50	struct ib_device *dev = pp->sec->dev;
     51
     52	spin_lock(&dev->port_data[pp->port_num].pkey_list_lock);
     53	list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list,
     54			     pkey_index_list) {
     55		if (tmp_pkey->pkey_index == pp->pkey_index) {
     56			pkey = tmp_pkey;
     57			break;
     58		}
     59	}
     60	spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock);
     61	return pkey;
     62}
     63
     64static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
     65				      u16 *pkey,
     66				      u64 *subnet_prefix)
     67{
     68	struct ib_device *dev = pp->sec->dev;
     69	int ret;
     70
     71	ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
     72	if (ret)
     73		return ret;
     74
     75	ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
     76
     77	return ret;
     78}
     79
     80static int enforce_qp_pkey_security(u16 pkey,
     81				    u64 subnet_prefix,
     82				    struct ib_qp_security *qp_sec)
     83{
     84	struct ib_qp_security *shared_qp_sec;
     85	int ret;
     86
     87	ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
     88	if (ret)
     89		return ret;
     90
     91	list_for_each_entry(shared_qp_sec,
     92			    &qp_sec->shared_qp_list,
     93			    shared_qp_list) {
     94		ret = security_ib_pkey_access(shared_qp_sec->security,
     95					      subnet_prefix,
     96					      pkey);
     97		if (ret)
     98			return ret;
     99	}
    100	return 0;
    101}
    102
    103/* The caller of this function must hold the QP security
    104 * mutex of the QP of the security structure in *pps.
    105 *
    106 * It takes separate ports_pkeys and security structure
    107 * because in some cases the pps will be for a new settings
    108 * or the pps will be for the real QP and security structure
    109 * will be for a shared QP.
    110 */
    111static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
    112				       struct ib_qp_security *sec)
    113{
    114	u64 subnet_prefix;
    115	u16 pkey;
    116	int ret = 0;
    117
    118	if (!pps)
    119		return 0;
    120
    121	if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
    122		ret = get_pkey_and_subnet_prefix(&pps->main,
    123						 &pkey,
    124						 &subnet_prefix);
    125		if (ret)
    126			return ret;
    127
    128		ret = enforce_qp_pkey_security(pkey,
    129					       subnet_prefix,
    130					       sec);
    131		if (ret)
    132			return ret;
    133	}
    134
    135	if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
    136		ret = get_pkey_and_subnet_prefix(&pps->alt,
    137						 &pkey,
    138						 &subnet_prefix);
    139		if (ret)
    140			return ret;
    141
    142		ret = enforce_qp_pkey_security(pkey,
    143					       subnet_prefix,
    144					       sec);
    145	}
    146
    147	return ret;
    148}
    149
    150/* The caller of this function must hold the QP security
    151 * mutex.
    152 */
    153static void qp_to_error(struct ib_qp_security *sec)
    154{
    155	struct ib_qp_security *shared_qp_sec;
    156	struct ib_qp_attr attr = {
    157		.qp_state = IB_QPS_ERR
    158	};
    159	struct ib_event event = {
    160		.event = IB_EVENT_QP_FATAL
    161	};
    162
    163	/* If the QP is in the process of being destroyed
    164	 * the qp pointer in the security structure is
    165	 * undefined.  It cannot be modified now.
    166	 */
    167	if (sec->destroying)
    168		return;
    169
    170	ib_modify_qp(sec->qp,
    171		     &attr,
    172		     IB_QP_STATE);
    173
    174	if (sec->qp->event_handler && sec->qp->qp_context) {
    175		event.element.qp = sec->qp;
    176		sec->qp->event_handler(&event,
    177				       sec->qp->qp_context);
    178	}
    179
    180	list_for_each_entry(shared_qp_sec,
    181			    &sec->shared_qp_list,
    182			    shared_qp_list) {
    183		struct ib_qp *qp = shared_qp_sec->qp;
    184
    185		if (qp->event_handler && qp->qp_context) {
    186			event.element.qp = qp;
    187			event.device = qp->device;
    188			qp->event_handler(&event,
    189					  qp->qp_context);
    190		}
    191	}
    192}
    193
    194static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
    195				  struct ib_device *device,
    196				  u32 port_num,
    197				  u64 subnet_prefix)
    198{
    199	struct ib_port_pkey *pp, *tmp_pp;
    200	bool comp;
    201	LIST_HEAD(to_error_list);
    202	u16 pkey_val;
    203
    204	if (!ib_get_cached_pkey(device,
    205				port_num,
    206				pkey->pkey_index,
    207				&pkey_val)) {
    208		spin_lock(&pkey->qp_list_lock);
    209		list_for_each_entry(pp, &pkey->qp_list, qp_list) {
    210			if (atomic_read(&pp->sec->error_list_count))
    211				continue;
    212
    213			if (enforce_qp_pkey_security(pkey_val,
    214						     subnet_prefix,
    215						     pp->sec)) {
    216				atomic_inc(&pp->sec->error_list_count);
    217				list_add(&pp->to_error_list,
    218					 &to_error_list);
    219			}
    220		}
    221		spin_unlock(&pkey->qp_list_lock);
    222	}
    223
    224	list_for_each_entry_safe(pp,
    225				 tmp_pp,
    226				 &to_error_list,
    227				 to_error_list) {
    228		mutex_lock(&pp->sec->mutex);
    229		qp_to_error(pp->sec);
    230		list_del(&pp->to_error_list);
    231		atomic_dec(&pp->sec->error_list_count);
    232		comp = pp->sec->destroying;
    233		mutex_unlock(&pp->sec->mutex);
    234
    235		if (comp)
    236			complete(&pp->sec->error_complete);
    237	}
    238}
    239
    240/* The caller of this function must hold the QP security
    241 * mutex.
    242 */
    243static int port_pkey_list_insert(struct ib_port_pkey *pp)
    244{
    245	struct pkey_index_qp_list *tmp_pkey;
    246	struct pkey_index_qp_list *pkey;
    247	struct ib_device *dev;
    248	u32 port_num = pp->port_num;
    249	int ret = 0;
    250
    251	if (pp->state != IB_PORT_PKEY_VALID)
    252		return 0;
    253
    254	dev = pp->sec->dev;
    255
    256	pkey = get_pkey_idx_qp_list(pp);
    257
    258	if (!pkey) {
    259		bool found = false;
    260
    261		pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
    262		if (!pkey)
    263			return -ENOMEM;
    264
    265		spin_lock(&dev->port_data[port_num].pkey_list_lock);
    266		/* Check for the PKey again.  A racing process may
    267		 * have created it.
    268		 */
    269		list_for_each_entry(tmp_pkey,
    270				    &dev->port_data[port_num].pkey_list,
    271				    pkey_index_list) {
    272			if (tmp_pkey->pkey_index == pp->pkey_index) {
    273				kfree(pkey);
    274				pkey = tmp_pkey;
    275				found = true;
    276				break;
    277			}
    278		}
    279
    280		if (!found) {
    281			pkey->pkey_index = pp->pkey_index;
    282			spin_lock_init(&pkey->qp_list_lock);
    283			INIT_LIST_HEAD(&pkey->qp_list);
    284			list_add(&pkey->pkey_index_list,
    285				 &dev->port_data[port_num].pkey_list);
    286		}
    287		spin_unlock(&dev->port_data[port_num].pkey_list_lock);
    288	}
    289
    290	spin_lock(&pkey->qp_list_lock);
    291	list_add(&pp->qp_list, &pkey->qp_list);
    292	spin_unlock(&pkey->qp_list_lock);
    293
    294	pp->state = IB_PORT_PKEY_LISTED;
    295
    296	return ret;
    297}
    298
    299/* The caller of this function must hold the QP security
    300 * mutex.
    301 */
    302static void port_pkey_list_remove(struct ib_port_pkey *pp)
    303{
    304	struct pkey_index_qp_list *pkey;
    305
    306	if (pp->state != IB_PORT_PKEY_LISTED)
    307		return;
    308
    309	pkey = get_pkey_idx_qp_list(pp);
    310
    311	spin_lock(&pkey->qp_list_lock);
    312	list_del(&pp->qp_list);
    313	spin_unlock(&pkey->qp_list_lock);
    314
    315	/* The setting may still be valid, i.e. after
    316	 * a destroy has failed for example.
    317	 */
    318	pp->state = IB_PORT_PKEY_VALID;
    319}
    320
    321static void destroy_qp_security(struct ib_qp_security *sec)
    322{
    323	security_ib_free_security(sec->security);
    324	kfree(sec->ports_pkeys);
    325	kfree(sec);
    326}
    327
    328/* The caller of this function must hold the QP security
    329 * mutex.
    330 */
    331static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
    332					  const struct ib_qp_attr *qp_attr,
    333					  int qp_attr_mask)
    334{
    335	struct ib_ports_pkeys *new_pps;
    336	struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
    337
    338	new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
    339	if (!new_pps)
    340		return NULL;
    341
    342	if (qp_attr_mask & IB_QP_PORT)
    343		new_pps->main.port_num = qp_attr->port_num;
    344	else if (qp_pps)
    345		new_pps->main.port_num = qp_pps->main.port_num;
    346
    347	if (qp_attr_mask & IB_QP_PKEY_INDEX)
    348		new_pps->main.pkey_index = qp_attr->pkey_index;
    349	else if (qp_pps)
    350		new_pps->main.pkey_index = qp_pps->main.pkey_index;
    351
    352	if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
    353	     (qp_attr_mask & IB_QP_PORT)) ||
    354	    (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
    355		new_pps->main.state = IB_PORT_PKEY_VALID;
    356
    357	if (qp_attr_mask & IB_QP_ALT_PATH) {
    358		new_pps->alt.port_num = qp_attr->alt_port_num;
    359		new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
    360		new_pps->alt.state = IB_PORT_PKEY_VALID;
    361	} else if (qp_pps) {
    362		new_pps->alt.port_num = qp_pps->alt.port_num;
    363		new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
    364		if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
    365			new_pps->alt.state = IB_PORT_PKEY_VALID;
    366	}
    367
    368	new_pps->main.sec = qp->qp_sec;
    369	new_pps->alt.sec = qp->qp_sec;
    370	return new_pps;
    371}
    372
    373int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
    374{
    375	struct ib_qp *real_qp = qp->real_qp;
    376	int ret;
    377
    378	ret = ib_create_qp_security(qp, dev);
    379
    380	if (ret)
    381		return ret;
    382
    383	if (!qp->qp_sec)
    384		return 0;
    385
    386	mutex_lock(&real_qp->qp_sec->mutex);
    387	ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
    388					  qp->qp_sec);
    389
    390	if (ret)
    391		goto ret;
    392
    393	if (qp != real_qp)
    394		list_add(&qp->qp_sec->shared_qp_list,
    395			 &real_qp->qp_sec->shared_qp_list);
    396ret:
    397	mutex_unlock(&real_qp->qp_sec->mutex);
    398	if (ret)
    399		destroy_qp_security(qp->qp_sec);
    400
    401	return ret;
    402}
    403
    404void ib_close_shared_qp_security(struct ib_qp_security *sec)
    405{
    406	struct ib_qp *real_qp = sec->qp->real_qp;
    407
    408	mutex_lock(&real_qp->qp_sec->mutex);
    409	list_del(&sec->shared_qp_list);
    410	mutex_unlock(&real_qp->qp_sec->mutex);
    411
    412	destroy_qp_security(sec);
    413}
    414
    415int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
    416{
    417	unsigned int i;
    418	bool is_ib = false;
    419	int ret;
    420
    421	rdma_for_each_port (dev, i) {
    422		is_ib = rdma_protocol_ib(dev, i);
    423		if (is_ib)
    424			break;
    425	}
    426
    427	/* If this isn't an IB device don't create the security context */
    428	if (!is_ib)
    429		return 0;
    430
    431	qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
    432	if (!qp->qp_sec)
    433		return -ENOMEM;
    434
    435	qp->qp_sec->qp = qp;
    436	qp->qp_sec->dev = dev;
    437	mutex_init(&qp->qp_sec->mutex);
    438	INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
    439	atomic_set(&qp->qp_sec->error_list_count, 0);
    440	init_completion(&qp->qp_sec->error_complete);
    441	ret = security_ib_alloc_security(&qp->qp_sec->security);
    442	if (ret) {
    443		kfree(qp->qp_sec);
    444		qp->qp_sec = NULL;
    445	}
    446
    447	return ret;
    448}
    449EXPORT_SYMBOL(ib_create_qp_security);
    450
    451void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
    452{
    453	/* Return if not IB */
    454	if (!sec)
    455		return;
    456
    457	mutex_lock(&sec->mutex);
    458
    459	/* Remove the QP from the lists so it won't get added to
    460	 * a to_error_list during the destroy process.
    461	 */
    462	if (sec->ports_pkeys) {
    463		port_pkey_list_remove(&sec->ports_pkeys->main);
    464		port_pkey_list_remove(&sec->ports_pkeys->alt);
    465	}
    466
    467	/* If the QP is already in one or more of those lists
    468	 * the destroying flag will ensure the to error flow
    469	 * doesn't operate on an undefined QP.
    470	 */
    471	sec->destroying = true;
    472
    473	/* Record the error list count to know how many completions
    474	 * to wait for.
    475	 */
    476	sec->error_comps_pending = atomic_read(&sec->error_list_count);
    477
    478	mutex_unlock(&sec->mutex);
    479}
    480
    481void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
    482{
    483	int ret;
    484	int i;
    485
    486	/* Return if not IB */
    487	if (!sec)
    488		return;
    489
    490	/* If a concurrent cache update is in progress this
    491	 * QP security could be marked for an error state
    492	 * transition.  Wait for this to complete.
    493	 */
    494	for (i = 0; i < sec->error_comps_pending; i++)
    495		wait_for_completion(&sec->error_complete);
    496
    497	mutex_lock(&sec->mutex);
    498	sec->destroying = false;
    499
    500	/* Restore the position in the lists and verify
    501	 * access is still allowed in case a cache update
    502	 * occurred while attempting to destroy.
    503	 *
    504	 * Because these setting were listed already
    505	 * and removed during ib_destroy_qp_security_begin
    506	 * we know the pkey_index_qp_list for the PKey
    507	 * already exists so port_pkey_list_insert won't fail.
    508	 */
    509	if (sec->ports_pkeys) {
    510		port_pkey_list_insert(&sec->ports_pkeys->main);
    511		port_pkey_list_insert(&sec->ports_pkeys->alt);
    512	}
    513
    514	ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
    515	if (ret)
    516		qp_to_error(sec);
    517
    518	mutex_unlock(&sec->mutex);
    519}
    520
    521void ib_destroy_qp_security_end(struct ib_qp_security *sec)
    522{
    523	int i;
    524
    525	/* Return if not IB */
    526	if (!sec)
    527		return;
    528
    529	/* If a concurrent cache update is occurring we must
    530	 * wait until this QP security structure is processed
    531	 * in the QP to error flow before destroying it because
    532	 * the to_error_list is in use.
    533	 */
    534	for (i = 0; i < sec->error_comps_pending; i++)
    535		wait_for_completion(&sec->error_complete);
    536
    537	destroy_qp_security(sec);
    538}
    539
    540void ib_security_cache_change(struct ib_device *device,
    541			      u32 port_num,
    542			      u64 subnet_prefix)
    543{
    544	struct pkey_index_qp_list *pkey;
    545
    546	list_for_each_entry (pkey, &device->port_data[port_num].pkey_list,
    547			     pkey_index_list) {
    548		check_pkey_qps(pkey,
    549			       device,
    550			       port_num,
    551			       subnet_prefix);
    552	}
    553}
    554
    555void ib_security_release_port_pkey_list(struct ib_device *device)
    556{
    557	struct pkey_index_qp_list *pkey, *tmp_pkey;
    558	unsigned int i;
    559
    560	rdma_for_each_port (device, i) {
    561		list_for_each_entry_safe(pkey,
    562					 tmp_pkey,
    563					 &device->port_data[i].pkey_list,
    564					 pkey_index_list) {
    565			list_del(&pkey->pkey_index_list);
    566			kfree(pkey);
    567		}
    568	}
    569}
    570
    571int ib_security_modify_qp(struct ib_qp *qp,
    572			  struct ib_qp_attr *qp_attr,
    573			  int qp_attr_mask,
    574			  struct ib_udata *udata)
    575{
    576	int ret = 0;
    577	struct ib_ports_pkeys *tmp_pps;
    578	struct ib_ports_pkeys *new_pps = NULL;
    579	struct ib_qp *real_qp = qp->real_qp;
    580	bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
    581			   real_qp->qp_type == IB_QPT_GSI ||
    582			   real_qp->qp_type >= IB_QPT_RESERVED1);
    583	bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
    584			   (qp_attr_mask & IB_QP_ALT_PATH));
    585
    586	WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
    587		   rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
    588		   !real_qp->qp_sec),
    589		   "%s: QP security is not initialized for IB QP: %u\n",
    590		   __func__, real_qp->qp_num);
    591
    592	/* The port/pkey settings are maintained only for the real QP. Open
    593	 * handles on the real QP will be in the shared_qp_list. When
    594	 * enforcing security on the real QP all the shared QPs will be
    595	 * checked as well.
    596	 */
    597
    598	if (pps_change && !special_qp && real_qp->qp_sec) {
    599		mutex_lock(&real_qp->qp_sec->mutex);
    600		new_pps = get_new_pps(real_qp,
    601				      qp_attr,
    602				      qp_attr_mask);
    603		if (!new_pps) {
    604			mutex_unlock(&real_qp->qp_sec->mutex);
    605			return -ENOMEM;
    606		}
    607		/* Add this QP to the lists for the new port
    608		 * and pkey settings before checking for permission
    609		 * in case there is a concurrent cache update
    610		 * occurring.  Walking the list for a cache change
    611		 * doesn't acquire the security mutex unless it's
    612		 * sending the QP to error.
    613		 */
    614		ret = port_pkey_list_insert(&new_pps->main);
    615
    616		if (!ret)
    617			ret = port_pkey_list_insert(&new_pps->alt);
    618
    619		if (!ret)
    620			ret = check_qp_port_pkey_settings(new_pps,
    621							  real_qp->qp_sec);
    622	}
    623
    624	if (!ret)
    625		ret = real_qp->device->ops.modify_qp(real_qp,
    626						     qp_attr,
    627						     qp_attr_mask,
    628						     udata);
    629
    630	if (new_pps) {
    631		/* Clean up the lists and free the appropriate
    632		 * ports_pkeys structure.
    633		 */
    634		if (ret) {
    635			tmp_pps = new_pps;
    636		} else {
    637			tmp_pps = real_qp->qp_sec->ports_pkeys;
    638			real_qp->qp_sec->ports_pkeys = new_pps;
    639		}
    640
    641		if (tmp_pps) {
    642			port_pkey_list_remove(&tmp_pps->main);
    643			port_pkey_list_remove(&tmp_pps->alt);
    644		}
    645		kfree(tmp_pps);
    646		mutex_unlock(&real_qp->qp_sec->mutex);
    647	}
    648	return ret;
    649}
    650
    651static int ib_security_pkey_access(struct ib_device *dev,
    652				   u32 port_num,
    653				   u16 pkey_index,
    654				   void *sec)
    655{
    656	u64 subnet_prefix;
    657	u16 pkey;
    658	int ret;
    659
    660	if (!rdma_protocol_ib(dev, port_num))
    661		return 0;
    662
    663	ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
    664	if (ret)
    665		return ret;
    666
    667	ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
    668
    669	return security_ib_pkey_access(sec, subnet_prefix, pkey);
    670}
    671
    672void ib_mad_agent_security_change(void)
    673{
    674	struct ib_mad_agent *ag;
    675
    676	spin_lock(&mad_agent_list_lock);
    677	list_for_each_entry(ag,
    678			    &mad_agent_list,
    679			    mad_agent_sec_list)
    680		WRITE_ONCE(ag->smp_allowed,
    681			   !security_ib_endport_manage_subnet(ag->security,
    682				dev_name(&ag->device->dev), ag->port_num));
    683	spin_unlock(&mad_agent_list_lock);
    684}
    685
    686int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
    687				enum ib_qp_type qp_type)
    688{
    689	int ret;
    690
    691	if (!rdma_protocol_ib(agent->device, agent->port_num))
    692		return 0;
    693
    694	INIT_LIST_HEAD(&agent->mad_agent_sec_list);
    695
    696	ret = security_ib_alloc_security(&agent->security);
    697	if (ret)
    698		return ret;
    699
    700	if (qp_type != IB_QPT_SMI)
    701		return 0;
    702
    703	spin_lock(&mad_agent_list_lock);
    704	ret = security_ib_endport_manage_subnet(agent->security,
    705						dev_name(&agent->device->dev),
    706						agent->port_num);
    707	if (ret)
    708		goto free_security;
    709
    710	WRITE_ONCE(agent->smp_allowed, true);
    711	list_add(&agent->mad_agent_sec_list, &mad_agent_list);
    712	spin_unlock(&mad_agent_list_lock);
    713	return 0;
    714
    715free_security:
    716	spin_unlock(&mad_agent_list_lock);
    717	security_ib_free_security(agent->security);
    718	return ret;
    719}
    720
    721void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
    722{
    723	if (!rdma_protocol_ib(agent->device, agent->port_num))
    724		return;
    725
    726	if (agent->qp->qp_type == IB_QPT_SMI) {
    727		spin_lock(&mad_agent_list_lock);
    728		list_del(&agent->mad_agent_sec_list);
    729		spin_unlock(&mad_agent_list_lock);
    730	}
    731
    732	security_ib_free_security(agent->security);
    733}
    734
    735int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
    736{
    737	if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
    738		return 0;
    739
    740	if (map->agent.qp->qp_type == IB_QPT_SMI) {
    741		if (!READ_ONCE(map->agent.smp_allowed))
    742			return -EACCES;
    743		return 0;
    744	}
    745
    746	return ib_security_pkey_access(map->agent.device,
    747				       map->agent.port_num,
    748				       pkey_index,
    749				       map->agent.security);
    750}