cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

t7xx_netdev.c (10175B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2021, MediaTek Inc.
      4 * Copyright (c) 2021-2022, Intel Corporation.
      5 *
      6 * Authors:
      7 *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
      8 *  Haijun Liu <haijun.liu@mediatek.com>
      9 *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
     10 *
     11 * Contributors:
     12 *  Amir Hanania <amir.hanania@intel.com>
     13 *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
     14 *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
     15 *  Eliot Lee <eliot.lee@intel.com>
     16 *  Moises Veleta <moises.veleta@intel.com>
     17 *  Sreehari Kancharla <sreehari.kancharla@intel.com>
     18 */
     19
     20#include <linux/atomic.h>
     21#include <linux/device.h>
     22#include <linux/gfp.h>
     23#include <linux/if_arp.h>
     24#include <linux/if_ether.h>
     25#include <linux/kernel.h>
     26#include <linux/list.h>
     27#include <linux/netdev_features.h>
     28#include <linux/netdevice.h>
     29#include <linux/skbuff.h>
     30#include <linux/types.h>
     31#include <linux/wwan.h>
     32#include <net/pkt_sched.h>
     33
     34#include "t7xx_hif_dpmaif_rx.h"
     35#include "t7xx_hif_dpmaif_tx.h"
     36#include "t7xx_netdev.h"
     37#include "t7xx_pci.h"
     38#include "t7xx_port_proxy.h"
     39#include "t7xx_state_monitor.h"
     40
     41#define IP_MUX_SESSION_DEFAULT	0
     42
     43static int t7xx_ccmni_open(struct net_device *dev)
     44{
     45	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
     46
     47	netif_carrier_on(dev);
     48	netif_tx_start_all_queues(dev);
     49	atomic_inc(&ccmni->usage);
     50	return 0;
     51}
     52
     53static int t7xx_ccmni_close(struct net_device *dev)
     54{
     55	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
     56
     57	atomic_dec(&ccmni->usage);
     58	netif_carrier_off(dev);
     59	netif_tx_disable(dev);
     60	return 0;
     61}
     62
     63static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
     64				  unsigned int txq_number)
     65{
     66	struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
     67	struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
     68
     69	skb_cb->netif_idx = ccmni->index;
     70
     71	if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
     72		return NETDEV_TX_BUSY;
     73
     74	return 0;
     75}
     76
     77static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
     78{
     79	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
     80	int skb_len = skb->len;
     81
     82	/* If MTU is changed or there is no headroom, drop the packet */
     83	if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
     84		dev_kfree_skb(skb);
     85		dev->stats.tx_dropped++;
     86		return NETDEV_TX_OK;
     87	}
     88
     89	if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
     90		return NETDEV_TX_BUSY;
     91
     92	dev->stats.tx_packets++;
     93	dev->stats.tx_bytes += skb_len;
     94
     95	return NETDEV_TX_OK;
     96}
     97
     98static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
     99{
    100	struct t7xx_ccmni *ccmni = netdev_priv(dev);
    101
    102	dev->stats.tx_errors++;
    103
    104	if (atomic_read(&ccmni->usage) > 0)
    105		netif_tx_wake_all_queues(dev);
    106}
    107
    108static const struct net_device_ops ccmni_netdev_ops = {
    109	.ndo_open	  = t7xx_ccmni_open,
    110	.ndo_stop	  = t7xx_ccmni_close,
    111	.ndo_start_xmit   = t7xx_ccmni_start_xmit,
    112	.ndo_tx_timeout   = t7xx_ccmni_tx_timeout,
    113};
    114
    115static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
    116{
    117	struct t7xx_ccmni *ccmni;
    118	int i;
    119
    120	for (i = 0; i < ctlb->nic_dev_num; i++) {
    121		ccmni = ctlb->ccmni_inst[i];
    122		if (!ccmni)
    123			continue;
    124
    125		if (atomic_read(&ccmni->usage) > 0) {
    126			netif_tx_start_all_queues(ccmni->dev);
    127			netif_carrier_on(ccmni->dev);
    128		}
    129	}
    130}
    131
    132static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
    133{
    134	struct t7xx_ccmni *ccmni;
    135	int i;
    136
    137	for (i = 0; i < ctlb->nic_dev_num; i++) {
    138		ccmni = ctlb->ccmni_inst[i];
    139		if (!ccmni)
    140			continue;
    141
    142		if (atomic_read(&ccmni->usage) > 0)
    143			netif_tx_disable(ccmni->dev);
    144	}
    145}
    146
    147static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
    148{
    149	struct t7xx_ccmni *ccmni;
    150	int i;
    151
    152	for (i = 0; i < ctlb->nic_dev_num; i++) {
    153		ccmni = ctlb->ccmni_inst[i];
    154		if (!ccmni)
    155			continue;
    156
    157		if (atomic_read(&ccmni->usage) > 0)
    158			netif_carrier_off(ccmni->dev);
    159	}
    160}
    161
    162static void t7xx_ccmni_wwan_setup(struct net_device *dev)
    163{
    164	dev->hard_header_len += sizeof(struct ccci_header);
    165
    166	dev->mtu = ETH_DATA_LEN;
    167	dev->max_mtu = CCMNI_MTU_MAX;
    168	BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
    169
    170	dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
    171	dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
    172
    173	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
    174
    175	dev->features = NETIF_F_VLAN_CHALLENGED;
    176
    177	dev->features |= NETIF_F_SG;
    178	dev->hw_features |= NETIF_F_SG;
    179
    180	dev->features |= NETIF_F_HW_CSUM;
    181	dev->hw_features |= NETIF_F_HW_CSUM;
    182
    183	dev->features |= NETIF_F_RXCSUM;
    184	dev->hw_features |= NETIF_F_RXCSUM;
    185
    186	dev->needs_free_netdev = true;
    187
    188	dev->type = ARPHRD_NONE;
    189
    190	dev->netdev_ops = &ccmni_netdev_ops;
    191}
    192
    193static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
    194				   struct netlink_ext_ack *extack)
    195{
    196	struct t7xx_ccmni_ctrl *ctlb = ctxt;
    197	struct t7xx_ccmni *ccmni;
    198	int ret;
    199
    200	if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
    201		return -EINVAL;
    202
    203	ccmni = wwan_netdev_drvpriv(dev);
    204	ccmni->index = if_id;
    205	ccmni->ctlb = ctlb;
    206	ccmni->dev = dev;
    207	atomic_set(&ccmni->usage, 0);
    208	ctlb->ccmni_inst[if_id] = ccmni;
    209
    210	ret = register_netdevice(dev);
    211	if (ret)
    212		return ret;
    213
    214	netif_device_attach(dev);
    215	return 0;
    216}
    217
    218static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
    219{
    220	struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
    221	struct t7xx_ccmni_ctrl *ctlb = ctxt;
    222	u8 if_id = ccmni->index;
    223
    224	if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
    225		return;
    226
    227	if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
    228		return;
    229
    230	unregister_netdevice(dev);
    231}
    232
    233static const struct wwan_ops ccmni_wwan_ops = {
    234	.priv_size = sizeof(struct t7xx_ccmni),
    235	.setup     = t7xx_ccmni_wwan_setup,
    236	.newlink   = t7xx_ccmni_wwan_newlink,
    237	.dellink   = t7xx_ccmni_wwan_dellink,
    238};
    239
    240static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
    241{
    242	struct device *dev = ctlb->hif_ctrl->dev;
    243	int ret;
    244
    245	if (ctlb->wwan_is_registered)
    246		return 0;
    247
    248	/* WWAN core will create a netdev for the default IP MUX channel */
    249	ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
    250	if (ret < 0) {
    251		dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
    252		return ret;
    253	}
    254
    255	ctlb->wwan_is_registered = true;
    256	return 0;
    257}
    258
    259static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
    260{
    261	struct t7xx_ccmni_ctrl *ctlb = para;
    262	struct device *dev;
    263	int ret = 0;
    264
    265	dev = ctlb->hif_ctrl->dev;
    266	ctlb->md_sta = state;
    267
    268	switch (state) {
    269	case MD_STATE_READY:
    270		ret = t7xx_ccmni_register_wwan(ctlb);
    271		if (!ret)
    272			t7xx_ccmni_start(ctlb);
    273		break;
    274
    275	case MD_STATE_EXCEPTION:
    276	case MD_STATE_STOPPED:
    277		t7xx_ccmni_pre_stop(ctlb);
    278
    279		ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
    280		if (ret < 0)
    281			dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
    282
    283		t7xx_ccmni_post_stop(ctlb);
    284		break;
    285
    286	case MD_STATE_WAITING_FOR_HS1:
    287	case MD_STATE_WAITING_TO_STOP:
    288		ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
    289		if (ret < 0)
    290			dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
    291
    292		break;
    293
    294	default:
    295		break;
    296	}
    297
    298	return ret;
    299}
    300
    301static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
    302{
    303	struct t7xx_ccmni_ctrl	*ctlb = t7xx_dev->ccmni_ctlb;
    304	struct t7xx_fsm_notifier *md_status_notifier;
    305
    306	md_status_notifier = &ctlb->md_status_notify;
    307	INIT_LIST_HEAD(&md_status_notifier->entry);
    308	md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
    309	md_status_notifier->data = ctlb;
    310
    311	t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
    312}
    313
    314static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
    315{
    316	struct t7xx_skb_cb *skb_cb;
    317	struct net_device *net_dev;
    318	struct t7xx_ccmni *ccmni;
    319	int pkt_type, skb_len;
    320	u8 netif_id;
    321
    322	skb_cb = T7XX_SKB_CB(skb);
    323	netif_id = skb_cb->netif_idx;
    324	ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
    325	if (!ccmni) {
    326		dev_kfree_skb(skb);
    327		return;
    328	}
    329
    330	net_dev = ccmni->dev;
    331	skb->dev = net_dev;
    332
    333	pkt_type = skb_cb->rx_pkt_type;
    334	if (pkt_type == PKT_TYPE_IP6)
    335		skb->protocol = htons(ETH_P_IPV6);
    336	else
    337		skb->protocol = htons(ETH_P_IP);
    338
    339	skb_len = skb->len;
    340	netif_rx(skb);
    341	net_dev->stats.rx_packets++;
    342	net_dev->stats.rx_bytes += skb_len;
    343}
    344
    345static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
    346{
    347	struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
    348	struct netdev_queue *net_queue;
    349
    350	if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
    351		net_queue = netdev_get_tx_queue(ccmni->dev, qno);
    352		if (netif_tx_queue_stopped(net_queue))
    353			netif_tx_wake_queue(net_queue);
    354	}
    355}
    356
    357static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
    358{
    359	struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
    360	struct netdev_queue *net_queue;
    361
    362	if (atomic_read(&ccmni->usage) > 0) {
    363		netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
    364		net_queue = netdev_get_tx_queue(ccmni->dev, qno);
    365		netif_tx_stop_queue(net_queue);
    366	}
    367}
    368
    369static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
    370					  enum dpmaif_txq_state state, int qno)
    371{
    372	struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
    373
    374	if (ctlb->md_sta != MD_STATE_READY)
    375		return;
    376
    377	if (!ctlb->ccmni_inst[0]) {
    378		dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
    379		return;
    380	}
    381
    382	if (state == DMPAIF_TXQ_STATE_IRQ)
    383		t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
    384	else if (state == DMPAIF_TXQ_STATE_FULL)
    385		t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
    386}
    387
    388int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
    389{
    390	struct device *dev = &t7xx_dev->pdev->dev;
    391	struct t7xx_ccmni_ctrl *ctlb;
    392
    393	ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
    394	if (!ctlb)
    395		return -ENOMEM;
    396
    397	t7xx_dev->ccmni_ctlb = ctlb;
    398	ctlb->t7xx_dev = t7xx_dev;
    399	ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
    400	ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
    401	ctlb->nic_dev_num = NIC_DEV_DEFAULT;
    402
    403	ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
    404	if (!ctlb->hif_ctrl)
    405		return -ENOMEM;
    406
    407	init_md_status_notifier(t7xx_dev);
    408	return 0;
    409}
    410
    411void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
    412{
    413	struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
    414
    415	t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
    416
    417	if (ctlb->wwan_is_registered) {
    418		wwan_unregister_ops(&t7xx_dev->pdev->dev);
    419		ctlb->wwan_is_registered = false;
    420	}
    421
    422	t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
    423}