cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (11008B)


      1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
      2/* Copyright (c) 2015 - 2021 Intel Corporation */
      3#include "main.h"
      4#include "../../../net/ethernet/intel/ice/ice.h"
      5
      6MODULE_ALIAS("i40iw");
      7MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
      8MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
      9MODULE_LICENSE("Dual BSD/GPL");
     10
     11static struct notifier_block irdma_inetaddr_notifier = {
     12	.notifier_call = irdma_inetaddr_event
     13};
     14
     15static struct notifier_block irdma_inetaddr6_notifier = {
     16	.notifier_call = irdma_inet6addr_event
     17};
     18
     19static struct notifier_block irdma_net_notifier = {
     20	.notifier_call = irdma_net_event
     21};
     22
     23static struct notifier_block irdma_netdevice_notifier = {
     24	.notifier_call = irdma_netdevice_event
     25};
     26
     27static void irdma_register_notifiers(void)
     28{
     29	register_inetaddr_notifier(&irdma_inetaddr_notifier);
     30	register_inet6addr_notifier(&irdma_inetaddr6_notifier);
     31	register_netevent_notifier(&irdma_net_notifier);
     32	register_netdevice_notifier(&irdma_netdevice_notifier);
     33}
     34
     35static void irdma_unregister_notifiers(void)
     36{
     37	unregister_netevent_notifier(&irdma_net_notifier);
     38	unregister_inetaddr_notifier(&irdma_inetaddr_notifier);
     39	unregister_inet6addr_notifier(&irdma_inetaddr6_notifier);
     40	unregister_netdevice_notifier(&irdma_netdevice_notifier);
     41}
     42
     43static void irdma_prep_tc_change(struct irdma_device *iwdev)
     44{
     45	iwdev->vsi.tc_change_pending = true;
     46	irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
     47
     48	/* Wait for all qp's to suspend */
     49	wait_event_timeout(iwdev->suspend_wq,
     50			   !atomic_read(&iwdev->vsi.qp_suspend_reqs),
     51			   IRDMA_EVENT_TIMEOUT);
     52	irdma_ws_reset(&iwdev->vsi);
     53}
     54
     55static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
     56{
     57	if (mtu < IRDMA_MIN_MTU_IPV4)
     58		ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
     59	else if (mtu < IRDMA_MIN_MTU_IPV6)
     60		ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
     61}
     62
     63static void irdma_fill_qos_info(struct irdma_l2params *l2params,
     64				struct iidc_qos_params *qos_info)
     65{
     66	int i;
     67
     68	l2params->num_tc = qos_info->num_tc;
     69	l2params->vsi_prio_type = qos_info->vport_priority_type;
     70	l2params->vsi_rel_bw = qos_info->vport_relative_bw;
     71	for (i = 0; i < l2params->num_tc; i++) {
     72		l2params->tc_info[i].egress_virt_up =
     73			qos_info->tc_info[i].egress_virt_up;
     74		l2params->tc_info[i].ingress_virt_up =
     75			qos_info->tc_info[i].ingress_virt_up;
     76		l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
     77		l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
     78		l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
     79	}
     80	for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
     81		l2params->up2tc[i] = qos_info->up2tc[i];
     82	if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
     83		l2params->dscp_mode = true;
     84		memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
     85	}
     86}
     87
     88static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
     89{
     90	struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
     91	struct irdma_l2params l2params = {};
     92
     93	if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
     94		ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
     95		if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
     96			l2params.mtu = iwdev->netdev->mtu;
     97			l2params.mtu_changed = true;
     98			irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
     99			irdma_change_l2params(&iwdev->vsi, &l2params);
    100		}
    101	} else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
    102		if (iwdev->vsi.tc_change_pending)
    103			return;
    104
    105		irdma_prep_tc_change(iwdev);
    106	} else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
    107		struct iidc_qos_params qos_info = {};
    108
    109		if (!iwdev->vsi.tc_change_pending)
    110			return;
    111
    112		l2params.tc_changed = true;
    113		ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
    114		ice_get_qos_params(pf, &qos_info);
    115		irdma_fill_qos_info(&l2params, &qos_info);
    116		if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
    117			iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
    118		irdma_change_l2params(&iwdev->vsi, &l2params);
    119	} else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
    120		ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
    121			   event->reg);
    122		if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
    123			u32 pe_criterr;
    124
    125			pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
    126#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
    127			if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
    128				ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
    129					  pe_criterr);
    130				iwdev->rf->reset = true;
    131			} else {
    132				ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
    133			}
    134		}
    135		if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
    136			ibdev_err(&iwdev->ibdev, "HMC Error\n");
    137			iwdev->rf->reset = true;
    138		}
    139		if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
    140			ibdev_err(&iwdev->ibdev, "PE Push Error\n");
    141			iwdev->rf->reset = true;
    142		}
    143		if (iwdev->rf->reset)
    144			iwdev->rf->gen_ops.request_reset(iwdev->rf);
    145	}
    146}
    147
    148/**
    149 * irdma_request_reset - Request a reset
    150 * @rf: RDMA PCI function
    151 */
    152static void irdma_request_reset(struct irdma_pci_f *rf)
    153{
    154	struct ice_pf *pf = rf->cdev;
    155
    156	ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
    157	ice_rdma_request_reset(pf, IIDC_PFR);
    158}
    159
    160/**
    161 * irdma_lan_register_qset - Register qset with LAN driver
    162 * @vsi: vsi structure
    163 * @tc_node: Traffic class node
    164 */
    165static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
    166				   struct irdma_ws_node *tc_node)
    167{
    168	struct irdma_device *iwdev = vsi->back_vsi;
    169	struct ice_pf *pf = iwdev->rf->cdev;
    170	struct iidc_rdma_qset_params qset = {};
    171	int ret;
    172
    173	qset.qs_handle = tc_node->qs_handle;
    174	qset.tc = tc_node->traffic_class;
    175	qset.vport_id = vsi->vsi_idx;
    176	ret = ice_add_rdma_qset(pf, &qset);
    177	if (ret) {
    178		ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
    179		return ret;
    180	}
    181
    182	tc_node->l2_sched_node_id = qset.teid;
    183	vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
    184
    185	return 0;
    186}
    187
    188/**
    189 * irdma_lan_unregister_qset - Unregister qset with LAN driver
    190 * @vsi: vsi structure
    191 * @tc_node: Traffic class node
    192 */
    193static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
    194				      struct irdma_ws_node *tc_node)
    195{
    196	struct irdma_device *iwdev = vsi->back_vsi;
    197	struct ice_pf *pf = iwdev->rf->cdev;
    198	struct iidc_rdma_qset_params qset = {};
    199
    200	qset.qs_handle = tc_node->qs_handle;
    201	qset.tc = tc_node->traffic_class;
    202	qset.vport_id = vsi->vsi_idx;
    203	qset.teid = tc_node->l2_sched_node_id;
    204
    205	if (ice_del_rdma_qset(pf, &qset))
    206		ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
    207}
    208
    209static void irdma_remove(struct auxiliary_device *aux_dev)
    210{
    211	struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
    212							    struct iidc_auxiliary_dev,
    213							    adev);
    214	struct ice_pf *pf = iidc_adev->pf;
    215	struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
    216
    217	irdma_ib_unregister_device(iwdev);
    218	ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
    219
    220	pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
    221}
    222
    223static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
    224				   struct ice_vsi *vsi)
    225{
    226	struct irdma_pci_f *rf = iwdev->rf;
    227
    228	rf->cdev = pf;
    229	rf->gen_ops.register_qset = irdma_lan_register_qset;
    230	rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
    231	rf->hw.hw_addr = pf->hw.hw_addr;
    232	rf->pcidev = pf->pdev;
    233	rf->msix_count =  pf->num_rdma_msix;
    234	rf->pf_id = pf->hw.pf_id;
    235	rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
    236	rf->default_vsi.vsi_idx = vsi->vsi_num;
    237	rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
    238			    IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
    239	rf->rdma_ver = IRDMA_GEN_2;
    240	rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
    241	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
    242	rf->gen_ops.request_reset = irdma_request_reset;
    243	rf->limits_sel = 7;
    244	rf->iwdev = iwdev;
    245	mutex_init(&iwdev->ah_tbl_lock);
    246	iwdev->netdev = vsi->netdev;
    247	iwdev->vsi_num = vsi->vsi_num;
    248	iwdev->init_state = INITIAL_STATE;
    249	iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
    250	iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
    251	iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
    252	iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
    253	if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
    254		iwdev->roce_mode = true;
    255}
    256
    257static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
    258{
    259	struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
    260							    struct iidc_auxiliary_dev,
    261							    adev);
    262	struct ice_pf *pf = iidc_adev->pf;
    263	struct ice_vsi *vsi = ice_get_main_vsi(pf);
    264	struct iidc_qos_params qos_info = {};
    265	struct irdma_device *iwdev;
    266	struct irdma_pci_f *rf;
    267	struct irdma_l2params l2params = {};
    268	int err;
    269
    270	if (!vsi)
    271		return -EIO;
    272	iwdev = ib_alloc_device(irdma_device, ibdev);
    273	if (!iwdev)
    274		return -ENOMEM;
    275	iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
    276	if (!iwdev->rf) {
    277		ib_dealloc_device(&iwdev->ibdev);
    278		return -ENOMEM;
    279	}
    280
    281	irdma_fill_device_info(iwdev, pf, vsi);
    282	rf = iwdev->rf;
    283
    284	err = irdma_ctrl_init_hw(rf);
    285	if (err)
    286		goto err_ctrl_init;
    287
    288	l2params.mtu = iwdev->netdev->mtu;
    289	ice_get_qos_params(pf, &qos_info);
    290	irdma_fill_qos_info(&l2params, &qos_info);
    291	if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
    292		iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
    293
    294	err = irdma_rt_init_hw(iwdev, &l2params);
    295	if (err)
    296		goto err_rt_init;
    297
    298	err = irdma_ib_register_device(iwdev);
    299	if (err)
    300		goto err_ibreg;
    301
    302	ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
    303
    304	ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
    305	auxiliary_set_drvdata(aux_dev, iwdev);
    306
    307	return 0;
    308
    309err_ibreg:
    310	irdma_rt_deinit_hw(iwdev);
    311err_rt_init:
    312	irdma_ctrl_deinit_hw(rf);
    313err_ctrl_init:
    314	kfree(iwdev->rf);
    315	ib_dealloc_device(&iwdev->ibdev);
    316
    317	return err;
    318}
    319
    320static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
    321	{.name = "ice.iwarp", },
    322	{.name = "ice.roce", },
    323	{},
    324};
    325
    326MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
    327
    328static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
    329	.adrv = {
    330	    .id_table = irdma_auxiliary_id_table,
    331	    .probe = irdma_probe,
    332	    .remove = irdma_remove,
    333	},
    334	.event_handler = irdma_iidc_event_handler,
    335};
    336
    337static int __init irdma_init_module(void)
    338{
    339	int ret;
    340
    341	ret = auxiliary_driver_register(&i40iw_auxiliary_drv);
    342	if (ret) {
    343		pr_err("Failed i40iw(gen_1) auxiliary_driver_register() ret=%d\n",
    344		       ret);
    345		return ret;
    346	}
    347
    348	ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
    349	if (ret) {
    350		auxiliary_driver_unregister(&i40iw_auxiliary_drv);
    351		pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
    352		       ret);
    353		return ret;
    354	}
    355
    356	irdma_register_notifiers();
    357
    358	return 0;
    359}
    360
    361static void __exit irdma_exit_module(void)
    362{
    363	irdma_unregister_notifiers();
    364	auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
    365	auxiliary_driver_unregister(&i40iw_auxiliary_drv);
    366}
    367
    368module_init(irdma_init_module);
    369module_exit(irdma_exit_module);