cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

en_main.c (11383B)


      1/*
      2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 *
     32 */
     33
     34#include <linux/cpumask.h>
     35#include <linux/module.h>
     36#include <linux/delay.h>
     37#include <linux/netdevice.h>
     38#include <linux/slab.h>
     39
     40#include <linux/mlx4/driver.h>
     41#include <linux/mlx4/device.h>
     42#include <linux/mlx4/cmd.h>
     43
     44#include "mlx4_en.h"
     45
     46MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
     47MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
     48MODULE_LICENSE("Dual BSD/GPL");
     49MODULE_VERSION(DRV_VERSION);
     50
     51static const char mlx4_en_version[] =
     52	DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
     53	DRV_VERSION "\n";
     54
     55#define MLX4_EN_PARM_INT(X, def_val, desc) \
     56	static unsigned int X = def_val;\
     57	module_param(X , uint, 0444); \
     58	MODULE_PARM_DESC(X, desc);
     59
     60
     61/*
     62 * Device scope module parameters
     63 */
     64
     65/* Enable RSS UDP traffic */
     66MLX4_EN_PARM_INT(udp_rss, 1,
     67		 "Enable RSS for incoming UDP traffic or disabled (0)");
     68
     69/* Priority pausing */
     70MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
     71			   " Per priority bit mask");
     72MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
     73			   " Per priority bit mask");
     74
     75MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
     76		 "Threshold for using inline data (range: 17-104, default: 104)");
     77
     78#define MAX_PFC_TX     0xff
     79#define MAX_PFC_RX     0xff
     80
     81void en_print(const char *level, const struct mlx4_en_priv *priv,
     82	      const char *format, ...)
     83{
     84	va_list args;
     85	struct va_format vaf;
     86
     87	va_start(args, format);
     88
     89	vaf.fmt = format;
     90	vaf.va = &args;
     91	if (priv->registered)
     92		printk("%s%s: %s: %pV",
     93		       level, DRV_NAME, priv->dev->name, &vaf);
     94	else
     95		printk("%s%s: %s: Port %d: %pV",
     96		       level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
     97		       priv->port, &vaf);
     98	va_end(args);
     99}
    100
    101void mlx4_en_update_loopback_state(struct net_device *dev,
    102				   netdev_features_t features)
    103{
    104	struct mlx4_en_priv *priv = netdev_priv(dev);
    105
    106	if (features & NETIF_F_LOOPBACK)
    107		priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
    108	else
    109		priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
    110
    111	priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
    112			MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
    113
    114	/* Drop the packet if SRIOV is not enabled
    115	 * and not performing the selftest or flb disabled
    116	 */
    117	if (mlx4_is_mfunc(priv->mdev->dev) &&
    118	    !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
    119		priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
    120
    121	/* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
    122	 * is requested
    123	 */
    124	if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
    125		priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
    126
    127	mutex_lock(&priv->mdev->state_lock);
    128	if ((priv->mdev->dev->caps.flags2 &
    129	     MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB) &&
    130	    priv->rss_map.indir_qp && priv->rss_map.indir_qp->qpn) {
    131		int i;
    132		int err = 0;
    133		int loopback = !!(features & NETIF_F_LOOPBACK);
    134
    135		for (i = 0; i < priv->rx_ring_num; i++) {
    136			int ret;
    137
    138			ret = mlx4_en_change_mcast_lb(priv,
    139						      &priv->rss_map.qps[i],
    140						      loopback);
    141			if (!err)
    142				err = ret;
    143		}
    144		if (err)
    145			mlx4_warn(priv->mdev, "failed to change mcast loopback\n");
    146	}
    147	mutex_unlock(&priv->mdev->state_lock);
    148}
    149
    150static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
    151{
    152	struct mlx4_en_profile *params = &mdev->profile;
    153	int i;
    154
    155	params->udp_rss = udp_rss;
    156	params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ?
    157		MLX4_EN_MIN_TX_RING_P_UP :
    158		min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
    159
    160	if (params->udp_rss && !(mdev->dev->caps.flags
    161					& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
    162		mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
    163		params->udp_rss = 0;
    164	}
    165	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
    166		params->prof[i].rx_pause = !(pfcrx || pfctx);
    167		params->prof[i].rx_ppp = pfcrx;
    168		params->prof[i].tx_pause = !(pfcrx || pfctx);
    169		params->prof[i].tx_ppp = pfctx;
    170		if (mlx4_low_memory_profile()) {
    171			params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE;
    172			params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE;
    173		} else {
    174			params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
    175			params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
    176		}
    177		params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
    178		params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
    179		params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
    180			params->prof[i].num_up;
    181		params->prof[i].rss_rings = 0;
    182		params->prof[i].inline_thold = inline_thold;
    183	}
    184}
    185
    186static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
    187{
    188	struct mlx4_en_dev *endev = ctx;
    189
    190	return endev->pndev[port];
    191}
    192
    193static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
    194			  enum mlx4_dev_event event, unsigned long port)
    195{
    196	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
    197	struct mlx4_en_priv *priv;
    198
    199	switch (event) {
    200	case MLX4_DEV_EVENT_PORT_UP:
    201	case MLX4_DEV_EVENT_PORT_DOWN:
    202		if (!mdev->pndev[port])
    203			return;
    204		priv = netdev_priv(mdev->pndev[port]);
    205		/* To prevent races, we poll the link state in a separate
    206		  task rather than changing it here */
    207		priv->link_state = event;
    208		queue_work(mdev->workqueue, &priv->linkstate_task);
    209		break;
    210
    211	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
    212		mlx4_err(mdev, "Internal error detected, restarting device\n");
    213		break;
    214
    215	case MLX4_DEV_EVENT_SLAVE_INIT:
    216	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
    217		break;
    218	default:
    219		if (port < 1 || port > dev->caps.num_ports ||
    220		    !mdev->pndev[port])
    221			return;
    222		mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
    223			  (int) port);
    224	}
    225}
    226
    227static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
    228{
    229	struct mlx4_en_dev *mdev = endev_ptr;
    230	int i;
    231
    232	mutex_lock(&mdev->state_lock);
    233	mdev->device_up = false;
    234	mutex_unlock(&mdev->state_lock);
    235
    236	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
    237		if (mdev->pndev[i])
    238			mlx4_en_destroy_netdev(mdev->pndev[i]);
    239
    240	destroy_workqueue(mdev->workqueue);
    241	(void) mlx4_mr_free(dev, &mdev->mr);
    242	iounmap(mdev->uar_map);
    243	mlx4_uar_free(dev, &mdev->priv_uar);
    244	mlx4_pd_free(dev, mdev->priv_pdn);
    245	if (mdev->nb.notifier_call)
    246		unregister_netdevice_notifier(&mdev->nb);
    247	kfree(mdev);
    248}
    249
    250static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
    251{
    252	int i;
    253	struct mlx4_en_dev *mdev = ctx;
    254
    255	/* Create a netdev for each port */
    256	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
    257		mlx4_info(mdev, "Activating port:%d\n", i);
    258		if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
    259			mdev->pndev[i] = NULL;
    260	}
    261
    262	/* register notifier */
    263	mdev->nb.notifier_call = mlx4_en_netdev_event;
    264	if (register_netdevice_notifier(&mdev->nb)) {
    265		mdev->nb.notifier_call = NULL;
    266		mlx4_err(mdev, "Failed to create notifier\n");
    267	}
    268}
    269
    270static void *mlx4_en_add(struct mlx4_dev *dev)
    271{
    272	struct mlx4_en_dev *mdev;
    273	int i;
    274
    275	printk_once(KERN_INFO "%s", mlx4_en_version);
    276
    277	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
    278	if (!mdev)
    279		goto err_free_res;
    280
    281	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
    282		goto err_free_dev;
    283
    284	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
    285		goto err_pd;
    286
    287	mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
    288				PAGE_SIZE);
    289	if (!mdev->uar_map)
    290		goto err_uar;
    291	spin_lock_init(&mdev->uar_lock);
    292
    293	mdev->dev = dev;
    294	mdev->dma_device = &dev->persist->pdev->dev;
    295	mdev->pdev = dev->persist->pdev;
    296	mdev->device_up = false;
    297
    298	mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
    299	if (!mdev->LSO_support)
    300		mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
    301
    302	if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
    303			 MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
    304			 0, 0, &mdev->mr)) {
    305		mlx4_err(mdev, "Failed allocating memory region\n");
    306		goto err_map;
    307	}
    308	if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
    309		mlx4_err(mdev, "Failed enabling memory region\n");
    310		goto err_mr;
    311	}
    312
    313	/* Build device profile according to supplied module parameters */
    314	mlx4_en_get_profile(mdev);
    315
    316	/* Configure which ports to start according to module parameters */
    317	mdev->port_cnt = 0;
    318	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
    319		mdev->port_cnt++;
    320
    321	/* Set default number of RX rings*/
    322	mlx4_en_set_num_rx_rings(mdev);
    323
    324	/* Create our own workqueue for reset/multicast tasks
    325	 * Note: we cannot use the shared workqueue because of deadlocks caused
    326	 *       by the rtnl lock */
    327	mdev->workqueue = create_singlethread_workqueue("mlx4_en");
    328	if (!mdev->workqueue)
    329		goto err_mr;
    330
    331	/* At this stage all non-port specific tasks are complete:
    332	 * mark the card state as up */
    333	mutex_init(&mdev->state_lock);
    334	mdev->device_up = true;
    335
    336	return mdev;
    337
    338err_mr:
    339	(void) mlx4_mr_free(dev, &mdev->mr);
    340err_map:
    341	if (mdev->uar_map)
    342		iounmap(mdev->uar_map);
    343err_uar:
    344	mlx4_uar_free(dev, &mdev->priv_uar);
    345err_pd:
    346	mlx4_pd_free(dev, mdev->priv_pdn);
    347err_free_dev:
    348	kfree(mdev);
    349err_free_res:
    350	return NULL;
    351}
    352
    353static struct mlx4_interface mlx4_en_interface = {
    354	.add		= mlx4_en_add,
    355	.remove		= mlx4_en_remove,
    356	.event		= mlx4_en_event,
    357	.get_dev	= mlx4_en_get_netdev,
    358	.protocol	= MLX4_PROT_ETH,
    359	.activate	= mlx4_en_activate,
    360};
    361
    362static void mlx4_en_verify_params(void)
    363{
    364	if (pfctx > MAX_PFC_TX) {
    365		pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
    366			pfctx, MAX_PFC_TX);
    367		pfctx = 0;
    368	}
    369
    370	if (pfcrx > MAX_PFC_RX) {
    371		pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
    372			pfcrx, MAX_PFC_RX);
    373		pfcrx = 0;
    374	}
    375
    376	if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
    377		pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
    378			inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
    379		inline_thold = MAX_INLINE;
    380	}
    381}
    382
    383static int __init mlx4_en_init(void)
    384{
    385	mlx4_en_verify_params();
    386	mlx4_en_init_ptys2ethtool_map();
    387
    388	return mlx4_register_interface(&mlx4_en_interface);
    389}
    390
    391static void __exit mlx4_en_cleanup(void)
    392{
    393	mlx4_unregister_interface(&mlx4_en_interface);
    394}
    395
    396module_init(mlx4_en_init);
    397module_exit(mlx4_en_cleanup);
    398