cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cm.c (16784B)


      1/*
      2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <rdma/ib_mad.h>
     34
     35#include <linux/mlx4/cmd.h>
     36#include <linux/rbtree.h>
     37#include <linux/idr.h>
     38#include <rdma/ib_cm.h>
     39
     40#include "mlx4_ib.h"
     41
     42#define CM_CLEANUP_CACHE_TIMEOUT  (30 * HZ)
     43
     44struct id_map_entry {
     45	struct rb_node node;
     46
     47	u32 sl_cm_id;
     48	u32 pv_cm_id;
     49	int slave_id;
     50	int scheduled_delete;
     51	struct mlx4_ib_dev *dev;
     52
     53	struct list_head list;
     54	struct delayed_work timeout;
     55};
     56
     57struct rej_tmout_entry {
     58	int slave;
     59	u32 rem_pv_cm_id;
     60	struct delayed_work timeout;
     61	struct xarray *xa_rej_tmout;
     62};
     63
     64struct cm_generic_msg {
     65	struct ib_mad_hdr hdr;
     66
     67	__be32 local_comm_id;
     68	__be32 remote_comm_id;
     69	unsigned char unused[2];
     70	__be16 rej_reason;
     71};
     72
     73struct cm_sidr_generic_msg {
     74	struct ib_mad_hdr hdr;
     75	__be32 request_id;
     76};
     77
     78struct cm_req_msg {
     79	unsigned char unused[0x60];
     80	union ib_gid primary_path_sgid;
     81};
     82
     83static struct workqueue_struct *cm_wq;
     84
     85static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
     86{
     87	if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
     88		struct cm_sidr_generic_msg *msg =
     89			(struct cm_sidr_generic_msg *)mad;
     90		msg->request_id = cpu_to_be32(cm_id);
     91	} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
     92		pr_err("trying to set local_comm_id in SIDR_REP\n");
     93		return;
     94	} else {
     95		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
     96		msg->local_comm_id = cpu_to_be32(cm_id);
     97	}
     98}
     99
    100static u32 get_local_comm_id(struct ib_mad *mad)
    101{
    102	if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
    103		struct cm_sidr_generic_msg *msg =
    104			(struct cm_sidr_generic_msg *)mad;
    105		return be32_to_cpu(msg->request_id);
    106	} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
    107		pr_err("trying to set local_comm_id in SIDR_REP\n");
    108		return -1;
    109	} else {
    110		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
    111		return be32_to_cpu(msg->local_comm_id);
    112	}
    113}
    114
    115static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
    116{
    117	if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
    118		struct cm_sidr_generic_msg *msg =
    119			(struct cm_sidr_generic_msg *)mad;
    120		msg->request_id = cpu_to_be32(cm_id);
    121	} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
    122		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
    123		return;
    124	} else {
    125		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
    126		msg->remote_comm_id = cpu_to_be32(cm_id);
    127	}
    128}
    129
    130static u32 get_remote_comm_id(struct ib_mad *mad)
    131{
    132	if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
    133		struct cm_sidr_generic_msg *msg =
    134			(struct cm_sidr_generic_msg *)mad;
    135		return be32_to_cpu(msg->request_id);
    136	} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
    137		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
    138		return -1;
    139	} else {
    140		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
    141		return be32_to_cpu(msg->remote_comm_id);
    142	}
    143}
    144
    145static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
    146{
    147	struct cm_req_msg *msg = (struct cm_req_msg *)mad;
    148
    149	return msg->primary_path_sgid;
    150}
    151
    152/* Lock should be taken before called */
    153static struct id_map_entry *
    154id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
    155{
    156	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
    157	struct rb_node *node = sl_id_map->rb_node;
    158
    159	while (node) {
    160		struct id_map_entry *id_map_entry =
    161			rb_entry(node, struct id_map_entry, node);
    162
    163		if (id_map_entry->sl_cm_id > sl_cm_id)
    164			node = node->rb_left;
    165		else if (id_map_entry->sl_cm_id < sl_cm_id)
    166			node = node->rb_right;
    167		else if (id_map_entry->slave_id > slave_id)
    168			node = node->rb_left;
    169		else if (id_map_entry->slave_id < slave_id)
    170			node = node->rb_right;
    171		else
    172			return id_map_entry;
    173	}
    174	return NULL;
    175}
    176
    177static void id_map_ent_timeout(struct work_struct *work)
    178{
    179	struct delayed_work *delay = to_delayed_work(work);
    180	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
    181	struct id_map_entry *found_ent;
    182	struct mlx4_ib_dev *dev = ent->dev;
    183	struct mlx4_ib_sriov *sriov = &dev->sriov;
    184	struct rb_root *sl_id_map = &sriov->sl_id_map;
    185
    186	spin_lock(&sriov->id_map_lock);
    187	if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
    188		goto out;
    189	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
    190	if (found_ent && found_ent == ent)
    191		rb_erase(&found_ent->node, sl_id_map);
    192
    193out:
    194	list_del(&ent->list);
    195	spin_unlock(&sriov->id_map_lock);
    196	kfree(ent);
    197}
    198
    199static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
    200{
    201	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
    202	struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
    203	struct id_map_entry *ent;
    204	int slave_id = new->slave_id;
    205	int sl_cm_id = new->sl_cm_id;
    206
    207	ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
    208	if (ent) {
    209		pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
    210			 sl_cm_id);
    211
    212		rb_replace_node(&ent->node, &new->node, sl_id_map);
    213		return;
    214	}
    215
    216	/* Go to the bottom of the tree */
    217	while (*link) {
    218		parent = *link;
    219		ent = rb_entry(parent, struct id_map_entry, node);
    220
    221		if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
    222			link = &(*link)->rb_left;
    223		else
    224			link = &(*link)->rb_right;
    225	}
    226
    227	rb_link_node(&new->node, parent, link);
    228	rb_insert_color(&new->node, sl_id_map);
    229}
    230
    231static struct id_map_entry *
    232id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
    233{
    234	int ret;
    235	struct id_map_entry *ent;
    236	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
    237
    238	ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
    239	if (!ent)
    240		return ERR_PTR(-ENOMEM);
    241
    242	ent->sl_cm_id = sl_cm_id;
    243	ent->slave_id = slave_id;
    244	ent->scheduled_delete = 0;
    245	ent->dev = to_mdev(ibdev);
    246	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
    247
    248	ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
    249			xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
    250	if (ret >= 0) {
    251		spin_lock(&sriov->id_map_lock);
    252		sl_id_map_add(ibdev, ent);
    253		list_add_tail(&ent->list, &sriov->cm_list);
    254		spin_unlock(&sriov->id_map_lock);
    255		return ent;
    256	}
    257
    258	/*error flow*/
    259	kfree(ent);
    260	mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
    261	return ERR_PTR(-ENOMEM);
    262}
    263
    264static struct id_map_entry *
    265id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
    266{
    267	struct id_map_entry *ent;
    268	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
    269
    270	spin_lock(&sriov->id_map_lock);
    271	if (*pv_cm_id == -1) {
    272		ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
    273		if (ent)
    274			*pv_cm_id = (int) ent->pv_cm_id;
    275	} else
    276		ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
    277	spin_unlock(&sriov->id_map_lock);
    278
    279	return ent;
    280}
    281
    282static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
    283{
    284	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
    285	unsigned long flags;
    286
    287	spin_lock(&sriov->id_map_lock);
    288	spin_lock_irqsave(&sriov->going_down_lock, flags);
    289	/*make sure that there is no schedule inside the scheduled work.*/
    290	if (!sriov->is_going_down && !id->scheduled_delete) {
    291		id->scheduled_delete = 1;
    292		queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
    293	} else if (id->scheduled_delete) {
    294		/* Adjust timeout if already scheduled */
    295		mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
    296	}
    297	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
    298	spin_unlock(&sriov->id_map_lock);
    299}
    300
    301#define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
    302int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
    303		struct ib_mad *mad)
    304{
    305	struct id_map_entry *id;
    306	u32 sl_cm_id;
    307	int pv_cm_id = -1;
    308
    309	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
    310	    mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
    311	    mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
    312	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
    313	    (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
    314		sl_cm_id = get_local_comm_id(mad);
    315		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
    316		if (id)
    317			goto cont;
    318		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
    319		if (IS_ERR(id)) {
    320			mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
    321				__func__, slave_id, sl_cm_id);
    322			return PTR_ERR(id);
    323		}
    324	} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
    325		   mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
    326		return 0;
    327	} else {
    328		sl_cm_id = get_local_comm_id(mad);
    329		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
    330	}
    331
    332	if (!id) {
    333		pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
    334			 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
    335		return -EINVAL;
    336	}
    337
    338cont:
    339	set_local_comm_id(mad, id->pv_cm_id);
    340
    341	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
    342		schedule_delayed(ibdev, id);
    343	return 0;
    344}
    345
    346static void rej_tmout_timeout(struct work_struct *work)
    347{
    348	struct delayed_work *delay = to_delayed_work(work);
    349	struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
    350	struct rej_tmout_entry *deleted;
    351
    352	deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
    353
    354	if (deleted != item)
    355		pr_debug("deleted(%p) != item(%p)\n", deleted, item);
    356
    357	kfree(item);
    358}
    359
    360static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
    361{
    362	struct rej_tmout_entry *item;
    363	struct rej_tmout_entry *old;
    364	int ret = 0;
    365
    366	xa_lock(&sriov->xa_rej_tmout);
    367	item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
    368
    369	if (item) {
    370		if (xa_err(item))
    371			ret =  xa_err(item);
    372		else
    373			/* If a retry, adjust delayed work */
    374			mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
    375		goto err_or_exists;
    376	}
    377	xa_unlock(&sriov->xa_rej_tmout);
    378
    379	item = kmalloc(sizeof(*item), GFP_KERNEL);
    380	if (!item)
    381		return -ENOMEM;
    382
    383	INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
    384	item->slave = slave;
    385	item->rem_pv_cm_id = rem_pv_cm_id;
    386	item->xa_rej_tmout = &sriov->xa_rej_tmout;
    387
    388	old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
    389	if (old) {
    390		pr_debug(
    391			"Non-null old entry (%p) or error (%d) when inserting\n",
    392			old, xa_err(old));
    393		kfree(item);
    394		return xa_err(old);
    395	}
    396
    397	queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
    398
    399	return 0;
    400
    401err_or_exists:
    402	xa_unlock(&sriov->xa_rej_tmout);
    403	return ret;
    404}
    405
    406static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
    407{
    408	struct rej_tmout_entry *item;
    409	int slave;
    410
    411	xa_lock(&sriov->xa_rej_tmout);
    412	item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
    413
    414	if (!item || xa_err(item)) {
    415		pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
    416			 rem_pv_cm_id, xa_err(item));
    417		slave = !item ? -ENOENT : xa_err(item);
    418	} else {
    419		slave = item->slave;
    420	}
    421	xa_unlock(&sriov->xa_rej_tmout);
    422
    423	return slave;
    424}
    425
    426int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
    427			     struct ib_mad *mad)
    428{
    429	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
    430	u32 rem_pv_cm_id = get_local_comm_id(mad);
    431	u32 pv_cm_id;
    432	struct id_map_entry *id;
    433	int sts;
    434
    435	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
    436	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
    437		union ib_gid gid;
    438
    439		if (!slave)
    440			return 0;
    441
    442		gid = gid_from_req_msg(ibdev, mad);
    443		*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
    444		if (*slave < 0) {
    445			mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
    446				     be64_to_cpu(gid.global.interface_id));
    447			return -ENOENT;
    448		}
    449
    450		sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
    451		if (sts)
    452			/* Even if this fails, we pass on the REQ to the slave */
    453			pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
    454				 rem_pv_cm_id, *slave, sts);
    455
    456		return 0;
    457	}
    458
    459	pv_cm_id = get_remote_comm_id(mad);
    460	id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
    461
    462	if (!id) {
    463		if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
    464		    REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
    465			*slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
    466
    467			return (*slave < 0) ? *slave : 0;
    468		}
    469		pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
    470			 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
    471		return -ENOENT;
    472	}
    473
    474	if (slave)
    475		*slave = id->slave_id;
    476	set_remote_comm_id(mad, id->sl_cm_id);
    477
    478	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
    479	    mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
    480		schedule_delayed(ibdev, id);
    481
    482	return 0;
    483}
    484
    485void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
    486{
    487	spin_lock_init(&dev->sriov.id_map_lock);
    488	INIT_LIST_HEAD(&dev->sriov.cm_list);
    489	dev->sriov.sl_id_map = RB_ROOT;
    490	xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
    491	xa_init(&dev->sriov.xa_rej_tmout);
    492}
    493
    494static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
    495{
    496	struct rej_tmout_entry *item;
    497	bool flush_needed = false;
    498	unsigned long id;
    499	int cnt = 0;
    500
    501	xa_lock(&sriov->xa_rej_tmout);
    502	xa_for_each(&sriov->xa_rej_tmout, id, item) {
    503		if (slave < 0 || slave == item->slave) {
    504			mod_delayed_work(cm_wq, &item->timeout, 0);
    505			flush_needed = true;
    506			++cnt;
    507		}
    508	}
    509	xa_unlock(&sriov->xa_rej_tmout);
    510
    511	if (flush_needed) {
    512		flush_workqueue(cm_wq);
    513		pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
    514			 cnt, slave);
    515	}
    516
    517	if (slave < 0)
    518		WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
    519}
    520
    521/* slave = -1 ==> all slaves */
    522/* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
    523void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
    524{
    525	struct mlx4_ib_sriov *sriov = &dev->sriov;
    526	struct rb_root *sl_id_map = &sriov->sl_id_map;
    527	struct list_head lh;
    528	struct rb_node *nd;
    529	int need_flush = 0;
    530	struct id_map_entry *map, *tmp_map;
    531	/* cancel all delayed work queue entries */
    532	INIT_LIST_HEAD(&lh);
    533	spin_lock(&sriov->id_map_lock);
    534	list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
    535		if (slave < 0 || slave == map->slave_id) {
    536			if (map->scheduled_delete)
    537				need_flush |= !cancel_delayed_work(&map->timeout);
    538		}
    539	}
    540
    541	spin_unlock(&sriov->id_map_lock);
    542
    543	if (need_flush)
    544		flush_workqueue(cm_wq); /* make sure all timers were flushed */
    545
    546	/* now, remove all leftover entries from databases*/
    547	spin_lock(&sriov->id_map_lock);
    548	if (slave < 0) {
    549		while (rb_first(sl_id_map)) {
    550			struct id_map_entry *ent =
    551				rb_entry(rb_first(sl_id_map),
    552					 struct id_map_entry, node);
    553
    554			rb_erase(&ent->node, sl_id_map);
    555			xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
    556		}
    557		list_splice_init(&dev->sriov.cm_list, &lh);
    558	} else {
    559		/* first, move nodes belonging to slave to db remove list */
    560		nd = rb_first(sl_id_map);
    561		while (nd) {
    562			struct id_map_entry *ent =
    563				rb_entry(nd, struct id_map_entry, node);
    564			nd = rb_next(nd);
    565			if (ent->slave_id == slave)
    566				list_move_tail(&ent->list, &lh);
    567		}
    568		/* remove those nodes from databases */
    569		list_for_each_entry_safe(map, tmp_map, &lh, list) {
    570			rb_erase(&map->node, sl_id_map);
    571			xa_erase(&sriov->pv_id_table, map->pv_cm_id);
    572		}
    573
    574		/* add remaining nodes from cm_list */
    575		list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
    576			if (slave == map->slave_id)
    577				list_move_tail(&map->list, &lh);
    578		}
    579	}
    580
    581	spin_unlock(&sriov->id_map_lock);
    582
    583	/* free any map entries left behind due to cancel_delayed_work above */
    584	list_for_each_entry_safe(map, tmp_map, &lh, list) {
    585		list_del(&map->list);
    586		kfree(map);
    587	}
    588
    589	rej_tmout_xa_cleanup(sriov, slave);
    590}
    591
    592int mlx4_ib_cm_init(void)
    593{
    594	cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
    595	if (!cm_wq)
    596		return -ENOMEM;
    597
    598	return 0;
    599}
    600
    601void mlx4_ib_cm_destroy(void)
    602{
    603	destroy_workqueue(cm_wq);
    604}