cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mpesw.c (2315B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
      3
      4#include <linux/netdevice.h>
      5#include <net/nexthop.h>
      6#include "lag/lag.h"
      7#include "eswitch.h"
      8#include "lib/mlx5.h"
      9
     10void mlx5_mpesw_work(struct work_struct *work)
     11{
     12	struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
     13
     14	mutex_lock(&ldev->lock);
     15	mlx5_disable_lag(ldev);
     16	mutex_unlock(&ldev->lock);
     17}
     18
     19static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
     20{
     21	struct mlx5_lag *ldev = dev->priv.lag;
     22
     23	if (!queue_work(ldev->wq, &ldev->mpesw_work))
     24		mlx5_core_warn(dev, "failed to queue work\n");
     25}
     26
     27void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
     28{
     29	struct mlx5_lag *ldev = dev->priv.lag;
     30
     31	if (!ldev)
     32		return;
     33
     34	mutex_lock(&ldev->lock);
     35	if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
     36	    ldev->mode == MLX5_LAG_MODE_MPESW)
     37		mlx5_lag_disable_mpesw(dev);
     38	mutex_unlock(&ldev->lock);
     39}
     40
     41int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
     42{
     43	struct mlx5_lag *ldev = dev->priv.lag;
     44	bool shared_fdb;
     45	int err = 0;
     46
     47	if (!ldev)
     48		return 0;
     49
     50	mutex_lock(&ldev->lock);
     51	if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
     52		goto out;
     53
     54	if (ldev->mode != MLX5_LAG_MODE_NONE) {
     55		err = -EINVAL;
     56		goto out;
     57	}
     58	shared_fdb = mlx5_shared_fdb_supported(ldev);
     59	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb);
     60	if (err)
     61		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
     62
     63out:
     64	mutex_unlock(&ldev->lock);
     65	return err;
     66}
     67
     68int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
     69{
     70	struct mlx5_lag *ldev = mdev->priv.lag;
     71
     72	if (!netif_is_bond_master(out_dev) || !ldev)
     73		return 0;
     74
     75	mutex_lock(&ldev->lock);
     76	if (ldev->mode == MLX5_LAG_MODE_MPESW) {
     77		mutex_unlock(&ldev->lock);
     78		return -EOPNOTSUPP;
     79	}
     80	mutex_unlock(&ldev->lock);
     81	return 0;
     82}
     83
     84bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
     85{
     86	bool ret;
     87
     88	ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
     89	return ret;
     90}
     91
     92void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
     93{
     94	INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
     95	atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
     96}
     97
     98void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
     99{
    100	cancel_delayed_work_sync(&ldev->bond_work);
    101}