cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nfp_net_repr.c (13764B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
      3
      4#include <linux/etherdevice.h>
      5#include <linux/io-64-nonatomic-hi-lo.h>
      6#include <linux/lockdep.h>
      7#include <net/dst_metadata.h>
      8
      9#include "nfpcore/nfp_cpp.h"
     10#include "nfpcore/nfp_nsp.h"
     11#include "nfp_app.h"
     12#include "nfp_main.h"
     13#include "nfp_net.h"
     14#include "nfp_net_ctrl.h"
     15#include "nfp_net_repr.h"
     16#include "nfp_net_sriov.h"
     17#include "nfp_port.h"
     18
     19struct net_device *
     20nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
     21{
     22	return rcu_dereference_protected(set->reprs[id],
     23					 nfp_app_is_locked(app));
     24}
     25
     26static void
     27nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
     28		      int tx_status)
     29{
     30	struct nfp_repr *repr = netdev_priv(netdev);
     31	struct nfp_repr_pcpu_stats *stats;
     32
     33	if (unlikely(tx_status != NET_XMIT_SUCCESS &&
     34		     tx_status != NET_XMIT_CN)) {
     35		this_cpu_inc(repr->stats->tx_drops);
     36		return;
     37	}
     38
     39	stats = this_cpu_ptr(repr->stats);
     40	u64_stats_update_begin(&stats->syncp);
     41	stats->tx_packets++;
     42	stats->tx_bytes += len;
     43	u64_stats_update_end(&stats->syncp);
     44}
     45
     46void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
     47{
     48	struct nfp_repr *repr = netdev_priv(netdev);
     49	struct nfp_repr_pcpu_stats *stats;
     50
     51	stats = this_cpu_ptr(repr->stats);
     52	u64_stats_update_begin(&stats->syncp);
     53	stats->rx_packets++;
     54	stats->rx_bytes += len;
     55	u64_stats_update_end(&stats->syncp);
     56}
     57
     58static void
     59nfp_repr_phy_port_get_stats64(struct nfp_port *port,
     60			      struct rtnl_link_stats64 *stats)
     61{
     62	u8 __iomem *mem = port->eth_stats;
     63
     64	stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
     65	stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
     66	stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
     67
     68	stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
     69	stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
     70	stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
     71}
     72
     73static void
     74nfp_repr_vnic_get_stats64(struct nfp_port *port,
     75			  struct rtnl_link_stats64 *stats)
     76{
     77	/* TX and RX stats are flipped as we are returning the stats as seen
     78	 * at the switch port corresponding to the VF.
     79	 */
     80	stats->tx_packets = readq(port->vnic + NFP_NET_CFG_STATS_RX_FRAMES);
     81	stats->tx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_RX_OCTETS);
     82	stats->tx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_RX_DISCARDS);
     83
     84	stats->rx_packets = readq(port->vnic + NFP_NET_CFG_STATS_TX_FRAMES);
     85	stats->rx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_TX_OCTETS);
     86	stats->rx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_TX_DISCARDS);
     87}
     88
     89static void
     90nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
     91{
     92	struct nfp_repr *repr = netdev_priv(netdev);
     93
     94	if (WARN_ON(!repr->port))
     95		return;
     96
     97	switch (repr->port->type) {
     98	case NFP_PORT_PHYS_PORT:
     99		if (!__nfp_port_get_eth_port(repr->port))
    100			break;
    101		nfp_repr_phy_port_get_stats64(repr->port, stats);
    102		break;
    103	case NFP_PORT_PF_PORT:
    104	case NFP_PORT_VF_PORT:
    105		nfp_repr_vnic_get_stats64(repr->port, stats);
    106		break;
    107	default:
    108		break;
    109	}
    110}
    111
    112static bool
    113nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
    114{
    115	switch (attr_id) {
    116	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
    117		return true;
    118	}
    119
    120	return false;
    121}
    122
    123static int
    124nfp_repr_get_host_stats64(const struct net_device *netdev,
    125			  struct rtnl_link_stats64 *stats)
    126{
    127	struct nfp_repr *repr = netdev_priv(netdev);
    128	int i;
    129
    130	for_each_possible_cpu(i) {
    131		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
    132		struct nfp_repr_pcpu_stats *repr_stats;
    133		unsigned int start;
    134
    135		repr_stats = per_cpu_ptr(repr->stats, i);
    136		do {
    137			start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
    138			tbytes = repr_stats->tx_bytes;
    139			tpkts = repr_stats->tx_packets;
    140			tdrops = repr_stats->tx_drops;
    141			rbytes = repr_stats->rx_bytes;
    142			rpkts = repr_stats->rx_packets;
    143		} while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
    144
    145		stats->tx_bytes += tbytes;
    146		stats->tx_packets += tpkts;
    147		stats->tx_dropped += tdrops;
    148		stats->rx_bytes += rbytes;
    149		stats->rx_packets += rpkts;
    150	}
    151
    152	return 0;
    153}
    154
    155static int
    156nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
    157			   void *stats)
    158{
    159	switch (attr_id) {
    160	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
    161		return nfp_repr_get_host_stats64(dev, stats);
    162	}
    163
    164	return -EINVAL;
    165}
    166
    167static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
    168{
    169	struct nfp_repr *repr = netdev_priv(netdev);
    170	int err;
    171
    172	err = nfp_app_check_mtu(repr->app, netdev, new_mtu);
    173	if (err)
    174		return err;
    175
    176	err = nfp_app_repr_change_mtu(repr->app, netdev, new_mtu);
    177	if (err)
    178		return err;
    179
    180	netdev->mtu = new_mtu;
    181
    182	return 0;
    183}
    184
    185static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
    186{
    187	struct nfp_repr *repr = netdev_priv(netdev);
    188	unsigned int len = skb->len;
    189	int ret;
    190
    191	skb_dst_drop(skb);
    192	dst_hold((struct dst_entry *)repr->dst);
    193	skb_dst_set(skb, (struct dst_entry *)repr->dst);
    194	skb->dev = repr->dst->u.port_info.lower_dev;
    195
    196	ret = dev_queue_xmit(skb);
    197	nfp_repr_inc_tx_stats(netdev, len, ret);
    198
    199	return NETDEV_TX_OK;
    200}
    201
    202static int nfp_repr_stop(struct net_device *netdev)
    203{
    204	struct nfp_repr *repr = netdev_priv(netdev);
    205	int err;
    206
    207	err = nfp_app_repr_stop(repr->app, repr);
    208	if (err)
    209		return err;
    210
    211	nfp_port_configure(netdev, false);
    212	return 0;
    213}
    214
    215static int nfp_repr_open(struct net_device *netdev)
    216{
    217	struct nfp_repr *repr = netdev_priv(netdev);
    218	int err;
    219
    220	err = nfp_port_configure(netdev, true);
    221	if (err)
    222		return err;
    223
    224	err = nfp_app_repr_open(repr->app, repr);
    225	if (err)
    226		goto err_port_disable;
    227
    228	return 0;
    229
    230err_port_disable:
    231	nfp_port_configure(netdev, false);
    232	return err;
    233}
    234
    235static netdev_features_t
    236nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
    237{
    238	struct nfp_repr *repr = netdev_priv(netdev);
    239	netdev_features_t old_features = features;
    240	netdev_features_t lower_features;
    241	struct net_device *lower_dev;
    242
    243	lower_dev = repr->dst->u.port_info.lower_dev;
    244
    245	lower_features = lower_dev->features;
    246	if (lower_features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
    247		lower_features |= NETIF_F_HW_CSUM;
    248
    249	features = netdev_intersect_features(features, lower_features);
    250	features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
    251	features |= NETIF_F_LLTX;
    252
    253	return features;
    254}
    255
    256const struct net_device_ops nfp_repr_netdev_ops = {
    257	.ndo_init		= nfp_app_ndo_init,
    258	.ndo_uninit		= nfp_app_ndo_uninit,
    259	.ndo_open		= nfp_repr_open,
    260	.ndo_stop		= nfp_repr_stop,
    261	.ndo_start_xmit		= nfp_repr_xmit,
    262	.ndo_change_mtu		= nfp_repr_change_mtu,
    263	.ndo_get_stats64	= nfp_repr_get_stats64,
    264	.ndo_has_offload_stats	= nfp_repr_has_offload_stats,
    265	.ndo_get_offload_stats	= nfp_repr_get_offload_stats,
    266	.ndo_get_phys_port_name	= nfp_port_get_phys_port_name,
    267	.ndo_setup_tc		= nfp_port_setup_tc,
    268	.ndo_set_vf_mac		= nfp_app_set_vf_mac,
    269	.ndo_set_vf_vlan	= nfp_app_set_vf_vlan,
    270	.ndo_set_vf_spoofchk	= nfp_app_set_vf_spoofchk,
    271	.ndo_set_vf_trust	= nfp_app_set_vf_trust,
    272	.ndo_get_vf_config	= nfp_app_get_vf_config,
    273	.ndo_set_vf_link_state	= nfp_app_set_vf_link_state,
    274	.ndo_fix_features	= nfp_repr_fix_features,
    275	.ndo_set_features	= nfp_port_set_features,
    276	.ndo_set_mac_address    = eth_mac_addr,
    277	.ndo_get_port_parent_id	= nfp_port_get_port_parent_id,
    278	.ndo_get_devlink_port	= nfp_devlink_get_devlink_port,
    279};
    280
    281void
    282nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
    283{
    284	struct nfp_repr *repr = netdev_priv(netdev);
    285
    286	if (repr->dst->u.port_info.lower_dev != lower)
    287		return;
    288
    289	netif_inherit_tso_max(netdev, lower);
    290
    291	netdev_update_features(netdev);
    292}
    293
    294static void nfp_repr_clean(struct nfp_repr *repr)
    295{
    296	unregister_netdev(repr->netdev);
    297	nfp_app_repr_clean(repr->app, repr->netdev);
    298	dst_release((struct dst_entry *)repr->dst);
    299	nfp_port_free(repr->port);
    300}
    301
    302static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
    303
    304static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
    305					   struct netdev_queue *txq,
    306					   void *_unused)
    307{
    308	lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
    309}
    310
    311static void nfp_repr_set_lockdep_class(struct net_device *dev)
    312{
    313	netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
    314}
    315
    316int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
    317		  u32 cmsg_port_id, struct nfp_port *port,
    318		  struct net_device *pf_netdev)
    319{
    320	struct nfp_repr *repr = netdev_priv(netdev);
    321	struct nfp_net *nn = netdev_priv(pf_netdev);
    322	u32 repr_cap = nn->tlv_caps.repr_cap;
    323	int err;
    324
    325	nfp_repr_set_lockdep_class(netdev);
    326
    327	repr->port = port;
    328	repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
    329	if (!repr->dst)
    330		return -ENOMEM;
    331	repr->dst->u.port_info.port_id = cmsg_port_id;
    332	repr->dst->u.port_info.lower_dev = pf_netdev;
    333
    334	netdev->netdev_ops = &nfp_repr_netdev_ops;
    335	netdev->ethtool_ops = &nfp_port_ethtool_ops;
    336
    337	netdev->max_mtu = pf_netdev->max_mtu;
    338
    339	/* Set features the lower device can support with representors */
    340	if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
    341		netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
    342
    343	netdev->hw_features = NETIF_F_HIGHDMA;
    344	if (repr_cap & NFP_NET_CFG_CTRL_RXCSUM_ANY)
    345		netdev->hw_features |= NETIF_F_RXCSUM;
    346	if (repr_cap & NFP_NET_CFG_CTRL_TXCSUM)
    347		netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
    348	if (repr_cap & NFP_NET_CFG_CTRL_GATHER)
    349		netdev->hw_features |= NETIF_F_SG;
    350	if ((repr_cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
    351	    repr_cap & NFP_NET_CFG_CTRL_LSO2)
    352		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
    353	if (repr_cap & NFP_NET_CFG_CTRL_RSS_ANY)
    354		netdev->hw_features |= NETIF_F_RXHASH;
    355	if (repr_cap & NFP_NET_CFG_CTRL_VXLAN) {
    356		if (repr_cap & NFP_NET_CFG_CTRL_LSO)
    357			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
    358	}
    359	if (repr_cap & NFP_NET_CFG_CTRL_NVGRE) {
    360		if (repr_cap & NFP_NET_CFG_CTRL_LSO)
    361			netdev->hw_features |= NETIF_F_GSO_GRE;
    362	}
    363	if (repr_cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
    364		netdev->hw_enc_features = netdev->hw_features;
    365
    366	netdev->vlan_features = netdev->hw_features;
    367
    368	if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
    369		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
    370	if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
    371		if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
    372			netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
    373		else
    374			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
    375	}
    376	if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
    377		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
    378
    379	netdev->features = netdev->hw_features;
    380
    381	/* Advertise but disable TSO by default. */
    382	netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
    383	netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
    384
    385	netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
    386	netdev->features |= NETIF_F_LLTX;
    387
    388	if (nfp_app_has_tc(app)) {
    389		netdev->features |= NETIF_F_HW_TC;
    390		netdev->hw_features |= NETIF_F_HW_TC;
    391	}
    392
    393	err = nfp_app_repr_init(app, netdev);
    394	if (err)
    395		goto err_clean;
    396
    397	err = register_netdev(netdev);
    398	if (err)
    399		goto err_repr_clean;
    400
    401	return 0;
    402
    403err_repr_clean:
    404	nfp_app_repr_clean(app, netdev);
    405err_clean:
    406	dst_release((struct dst_entry *)repr->dst);
    407	return err;
    408}
    409
    410static void __nfp_repr_free(struct nfp_repr *repr)
    411{
    412	free_percpu(repr->stats);
    413	free_netdev(repr->netdev);
    414}
    415
    416void nfp_repr_free(struct net_device *netdev)
    417{
    418	__nfp_repr_free(netdev_priv(netdev));
    419}
    420
    421struct net_device *
    422nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs)
    423{
    424	struct net_device *netdev;
    425	struct nfp_repr *repr;
    426
    427	netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs);
    428	if (!netdev)
    429		return NULL;
    430
    431	netif_carrier_off(netdev);
    432
    433	repr = netdev_priv(netdev);
    434	repr->netdev = netdev;
    435	repr->app = app;
    436
    437	repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
    438	if (!repr->stats)
    439		goto err_free_netdev;
    440
    441	return netdev;
    442
    443err_free_netdev:
    444	free_netdev(netdev);
    445	return NULL;
    446}
    447
    448void nfp_repr_clean_and_free(struct nfp_repr *repr)
    449{
    450	nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
    451		 repr->netdev->name);
    452	nfp_repr_clean(repr);
    453	__nfp_repr_free(repr);
    454}
    455
    456void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
    457{
    458	struct net_device *netdev;
    459	unsigned int i;
    460
    461	for (i = 0; i < reprs->num_reprs; i++) {
    462		netdev = nfp_repr_get_locked(app, reprs, i);
    463		if (netdev)
    464			nfp_repr_clean_and_free(netdev_priv(netdev));
    465	}
    466
    467	kfree(reprs);
    468}
    469
    470void
    471nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
    472{
    473	struct net_device *netdev;
    474	struct nfp_reprs *reprs;
    475	int i;
    476
    477	reprs = rcu_dereference_protected(app->reprs[type],
    478					  nfp_app_is_locked(app));
    479	if (!reprs)
    480		return;
    481
    482	/* Preclean must happen before we remove the reprs reference from the
    483	 * app below.
    484	 */
    485	for (i = 0; i < reprs->num_reprs; i++) {
    486		netdev = nfp_repr_get_locked(app, reprs, i);
    487		if (netdev)
    488			nfp_app_repr_preclean(app, netdev);
    489	}
    490
    491	reprs = nfp_app_reprs_set(app, type, NULL);
    492
    493	synchronize_rcu();
    494	nfp_reprs_clean_and_free(app, reprs);
    495}
    496
    497struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
    498{
    499	struct nfp_reprs *reprs;
    500
    501	reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL);
    502	if (!reprs)
    503		return NULL;
    504	reprs->num_reprs = num_reprs;
    505
    506	return reprs;
    507}
    508
    509int nfp_reprs_resync_phys_ports(struct nfp_app *app)
    510{
    511	struct net_device *netdev;
    512	struct nfp_reprs *reprs;
    513	struct nfp_repr *repr;
    514	int i;
    515
    516	reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
    517	if (!reprs)
    518		return 0;
    519
    520	for (i = 0; i < reprs->num_reprs; i++) {
    521		netdev = nfp_repr_get_locked(app, reprs, i);
    522		if (!netdev)
    523			continue;
    524
    525		repr = netdev_priv(netdev);
    526		if (repr->port->type != NFP_PORT_INVALID)
    527			continue;
    528
    529		nfp_app_repr_preclean(app, netdev);
    530		rtnl_lock();
    531		rcu_assign_pointer(reprs->reprs[i], NULL);
    532		rtnl_unlock();
    533		synchronize_rcu();
    534		nfp_repr_clean(repr);
    535	}
    536
    537	return 0;
    538}