cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

selq.c (5981B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
      3
      4#include "selq.h"
      5#include <linux/slab.h>
      6#include <linux/netdevice.h>
      7#include <linux/rcupdate.h>
      8#include "en.h"
      9#include "en/ptp.h"
     10
     11struct mlx5e_selq_params {
     12	unsigned int num_regular_queues;
     13	unsigned int num_channels;
     14	unsigned int num_tcs;
     15	union {
     16		u8 is_special_queues;
     17		struct {
     18			bool is_htb : 1;
     19			bool is_ptp : 1;
     20		};
     21	};
     22};
     23
     24int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
     25{
     26	struct mlx5e_selq_params *init_params;
     27
     28	selq->state_lock = state_lock;
     29
     30	selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
     31	if (!selq->standby)
     32		return -ENOMEM;
     33
     34	init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
     35	if (!init_params) {
     36		kvfree(selq->standby);
     37		selq->standby = NULL;
     38		return -ENOMEM;
     39	}
     40	/* Assign dummy values, so that mlx5e_select_queue won't crash. */
     41	*init_params = (struct mlx5e_selq_params) {
     42		.num_regular_queues = 1,
     43		.num_channels = 1,
     44		.num_tcs = 1,
     45		.is_htb = false,
     46		.is_ptp = false,
     47	};
     48	rcu_assign_pointer(selq->active, init_params);
     49
     50	return 0;
     51}
     52
     53void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
     54{
     55	WARN_ON_ONCE(selq->is_prepared);
     56
     57	kvfree(selq->standby);
     58	selq->standby = NULL;
     59	selq->is_prepared = true;
     60
     61	mlx5e_selq_apply(selq);
     62
     63	kvfree(selq->standby);
     64	selq->standby = NULL;
     65}
     66
     67void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
     68{
     69	lockdep_assert_held(selq->state_lock);
     70	WARN_ON_ONCE(selq->is_prepared);
     71
     72	selq->is_prepared = true;
     73
     74	selq->standby->num_channels = params->num_channels;
     75	selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
     76	selq->standby->num_regular_queues =
     77		selq->standby->num_channels * selq->standby->num_tcs;
     78	selq->standby->is_htb = htb;
     79	selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
     80}
     81
     82void mlx5e_selq_apply(struct mlx5e_selq *selq)
     83{
     84	struct mlx5e_selq_params *old_params;
     85
     86	WARN_ON_ONCE(!selq->is_prepared);
     87
     88	selq->is_prepared = false;
     89
     90	old_params = rcu_replace_pointer(selq->active, selq->standby,
     91					 lockdep_is_held(selq->state_lock));
     92	synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
     93	selq->standby = old_params;
     94}
     95
     96void mlx5e_selq_cancel(struct mlx5e_selq *selq)
     97{
     98	lockdep_assert_held(selq->state_lock);
     99	WARN_ON_ONCE(!selq->is_prepared);
    100
    101	selq->is_prepared = false;
    102}
    103
    104#ifdef CONFIG_MLX5_CORE_EN_DCB
    105static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
    106{
    107	int dscp_cp = 0;
    108
    109	if (skb->protocol == htons(ETH_P_IP))
    110		dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
    111	else if (skb->protocol == htons(ETH_P_IPV6))
    112		dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
    113
    114	return priv->dcbx_dp.dscp2prio[dscp_cp];
    115}
    116#endif
    117
    118static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb)
    119{
    120#ifdef CONFIG_MLX5_CORE_EN_DCB
    121	if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP)
    122		return mlx5e_get_dscp_up(priv, skb);
    123#endif
    124	if (skb_vlan_tag_present(skb))
    125		return skb_vlan_tag_get_prio(skb);
    126	return 0;
    127}
    128
    129static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
    130			      struct mlx5e_selq_params *selq)
    131{
    132	struct mlx5e_priv *priv = netdev_priv(dev);
    133	int up;
    134
    135	up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0;
    136
    137	return selq->num_regular_queues + up;
    138}
    139
    140static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
    141{
    142	u16 classid;
    143
    144	/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
    145	if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
    146		classid = TC_H_MIN(skb->priority);
    147	else
    148		classid = READ_ONCE(priv->htb.defcls);
    149
    150	if (!classid)
    151		return 0;
    152
    153	return mlx5e_get_txq_by_classid(priv, classid);
    154}
    155
    156u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
    157		       struct net_device *sb_dev)
    158{
    159	struct mlx5e_priv *priv = netdev_priv(dev);
    160	struct mlx5e_selq_params *selq;
    161	int txq_ix, up;
    162
    163	selq = rcu_dereference_bh(priv->selq.active);
    164
    165	/* This is a workaround needed only for the mlx5e_netdev_change_profile
    166	 * flow that zeroes out the whole priv without unregistering the netdev
    167	 * and without preventing ndo_select_queue from being called.
    168	 */
    169	if (unlikely(!selq))
    170		return 0;
    171
    172	if (likely(!selq->is_special_queues)) {
    173		/* No special queues, netdev_pick_tx returns one of the regular ones. */
    174
    175		txq_ix = netdev_pick_tx(dev, skb, NULL);
    176
    177		if (selq->num_tcs <= 1)
    178			return txq_ix;
    179
    180		up = mlx5e_get_up(priv, skb);
    181
    182		/* Normalize any picked txq_ix to [0, num_channels),
    183		 * So we can return a txq_ix that matches the channel and
    184		 * packet UP.
    185		 */
    186		return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
    187			up * selq->num_channels;
    188	}
    189
    190	if (unlikely(selq->is_htb)) {
    191		/* num_tcs == 1, shortcut for PTP */
    192
    193		txq_ix = mlx5e_select_htb_queue(priv, skb);
    194		if (txq_ix > 0)
    195			return txq_ix;
    196
    197		if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
    198			return selq->num_channels;
    199
    200		txq_ix = netdev_pick_tx(dev, skb, NULL);
    201
    202		/* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
    203		 * If they are selected, switch to regular queues.
    204		 * Driver to select these queues only at mlx5e_select_ptpsq()
    205		 * and mlx5e_select_htb_queue().
    206		 */
    207		return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
    208	}
    209
    210	/* PTP is enabled */
    211
    212	if (mlx5e_use_ptpsq(skb))
    213		return mlx5e_select_ptpsq(dev, skb, selq);
    214
    215	txq_ix = netdev_pick_tx(dev, skb, NULL);
    216
    217	/* Normalize any picked txq_ix to [0, num_channels). Queues in range
    218	 * [0, num_regular_queues) will be mapped to the corresponding channel
    219	 * index, so that we can apply the packet's UP (if num_tcs > 1).
    220	 * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
    221	 * because driver should select the PTP only at mlx5e_select_ptpsq().
    222	 */
    223	txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
    224
    225	if (selq->num_tcs <= 1)
    226		return txq_ix;
    227
    228	up = mlx5e_get_up(priv, skb);
    229
    230	return txq_ix + up * selq->num_channels;
    231}