cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ib_rep.c (5789B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
      4 */
      5
      6#include <linux/mlx5/vport.h>
      7#include "ib_rep.h"
      8#include "srq.h"
      9
     10static int
     11mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
     12		      struct mlx5_eswitch_rep *rep,
     13		      int vport_index)
     14{
     15	struct mlx5_ib_dev *ibdev;
     16
     17	ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
     18	if (!ibdev)
     19		return -EINVAL;
     20
     21	ibdev->port[vport_index].rep = rep;
     22	rep->rep_data[REP_IB].priv = ibdev;
     23	write_lock(&ibdev->port[vport_index].roce.netdev_lock);
     24	ibdev->port[vport_index].roce.netdev =
     25		mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
     26	write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
     27
     28	return 0;
     29}
     30
     31static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
     32
     33static int
     34mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
     35{
     36	u32 num_ports = mlx5_eswitch_get_total_vports(dev);
     37	const struct mlx5_ib_profile *profile;
     38	struct mlx5_core_dev *peer_dev;
     39	struct mlx5_ib_dev *ibdev;
     40	u32 peer_num_ports;
     41	int vport_index;
     42	int ret;
     43
     44	vport_index = rep->vport_index;
     45
     46	if (mlx5_lag_is_shared_fdb(dev)) {
     47		peer_dev = mlx5_lag_get_peer_mdev(dev);
     48		peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
     49		if (mlx5_lag_is_master(dev)) {
     50			/* Only 1 ib port is the representor for both uplinks */
     51			num_ports += peer_num_ports - 1;
     52		} else {
     53			if (rep->vport == MLX5_VPORT_UPLINK)
     54				return 0;
     55			vport_index += peer_num_ports;
     56			dev = peer_dev;
     57		}
     58	}
     59
     60	if (rep->vport == MLX5_VPORT_UPLINK)
     61		profile = &raw_eth_profile;
     62	else
     63		return mlx5_ib_set_vport_rep(dev, rep, vport_index);
     64
     65	ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
     66	if (!ibdev)
     67		return -ENOMEM;
     68
     69	ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
     70			      GFP_KERNEL);
     71	if (!ibdev->port) {
     72		ret = -ENOMEM;
     73		goto fail_port;
     74	}
     75
     76	ibdev->is_rep = true;
     77	vport_index = rep->vport_index;
     78	ibdev->port[vport_index].rep = rep;
     79	ibdev->port[vport_index].roce.netdev =
     80		mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
     81	ibdev->mdev = dev;
     82	ibdev->num_ports = num_ports;
     83
     84	ret = __mlx5_ib_add(ibdev, profile);
     85	if (ret)
     86		goto fail_add;
     87
     88	rep->rep_data[REP_IB].priv = ibdev;
     89	if (mlx5_lag_is_shared_fdb(dev))
     90		mlx5_ib_register_peer_vport_reps(dev);
     91
     92	return 0;
     93
     94fail_add:
     95	kfree(ibdev->port);
     96fail_port:
     97	ib_dealloc_device(&ibdev->ib_dev);
     98	return ret;
     99}
    100
    101static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
    102{
    103	return rep->rep_data[REP_IB].priv;
    104}
    105
    106static void
    107mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
    108{
    109	struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw);
    110	struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
    111	int vport_index = rep->vport_index;
    112	struct mlx5_ib_port *port;
    113
    114	if (WARN_ON(!mdev))
    115		return;
    116
    117	if (mlx5_lag_is_shared_fdb(mdev) &&
    118	    !mlx5_lag_is_master(mdev)) {
    119		struct mlx5_core_dev *peer_mdev;
    120
    121		if (rep->vport == MLX5_VPORT_UPLINK)
    122			return;
    123		peer_mdev = mlx5_lag_get_peer_mdev(mdev);
    124		vport_index += mlx5_eswitch_get_total_vports(peer_mdev);
    125	}
    126
    127	if (!dev)
    128		return;
    129
    130	port = &dev->port[vport_index];
    131	write_lock(&port->roce.netdev_lock);
    132	port->roce.netdev = NULL;
    133	write_unlock(&port->roce.netdev_lock);
    134	rep->rep_data[REP_IB].priv = NULL;
    135	port->rep = NULL;
    136
    137	if (rep->vport == MLX5_VPORT_UPLINK) {
    138		struct mlx5_core_dev *peer_mdev;
    139		struct mlx5_eswitch *esw;
    140
    141		if (mlx5_lag_is_shared_fdb(mdev)) {
    142			peer_mdev = mlx5_lag_get_peer_mdev(mdev);
    143			esw = peer_mdev->priv.eswitch;
    144			mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
    145		}
    146		__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
    147	}
    148}
    149
    150static const struct mlx5_eswitch_rep_ops rep_ops = {
    151	.load = mlx5_ib_vport_rep_load,
    152	.unload = mlx5_ib_vport_rep_unload,
    153	.get_proto_dev = mlx5_ib_rep_to_dev,
    154};
    155
    156static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
    157{
    158	struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev);
    159	struct mlx5_eswitch *esw;
    160
    161	if (!peer_mdev)
    162		return;
    163
    164	esw = peer_mdev->priv.eswitch;
    165	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
    166}
    167
    168struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
    169					  u16 vport_num)
    170{
    171	return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
    172}
    173
    174struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
    175						   struct mlx5_ib_sq *sq,
    176						   u32 port)
    177{
    178	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
    179	struct mlx5_eswitch_rep *rep;
    180
    181	if (!dev->is_rep || !port)
    182		return NULL;
    183
    184	if (!dev->port[port - 1].rep)
    185		return ERR_PTR(-EINVAL);
    186
    187	rep = dev->port[port - 1].rep;
    188
    189	return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn);
    190}
    191
    192static int mlx5r_rep_probe(struct auxiliary_device *adev,
    193			   const struct auxiliary_device_id *id)
    194{
    195	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
    196	struct mlx5_core_dev *mdev = idev->mdev;
    197	struct mlx5_eswitch *esw;
    198
    199	esw = mdev->priv.eswitch;
    200	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
    201	return 0;
    202}
    203
    204static void mlx5r_rep_remove(struct auxiliary_device *adev)
    205{
    206	struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
    207	struct mlx5_core_dev *mdev = idev->mdev;
    208	struct mlx5_eswitch *esw;
    209
    210	esw = mdev->priv.eswitch;
    211	mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
    212}
    213
    214static const struct auxiliary_device_id mlx5r_rep_id_table[] = {
    215	{ .name = MLX5_ADEV_NAME ".rdma-rep", },
    216	{},
    217};
    218
    219MODULE_DEVICE_TABLE(auxiliary, mlx5r_rep_id_table);
    220
    221static struct auxiliary_driver mlx5r_rep_driver = {
    222	.name = "rep",
    223	.probe = mlx5r_rep_probe,
    224	.remove = mlx5r_rep_remove,
    225	.id_table = mlx5r_rep_id_table,
    226};
    227
    228int mlx5r_rep_init(void)
    229{
    230	return auxiliary_driver_register(&mlx5r_rep_driver);
    231}
    232
    233void mlx5r_rep_cleanup(void)
    234{
    235	auxiliary_driver_unregister(&mlx5r_rep_driver);
    236}