cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ehea_main.c (85391B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
      4 *
      5 *  eHEA ethernet device driver for IBM eServer System p
      6 *
      7 *  (C) Copyright IBM Corp. 2006
      8 *
      9 *  Authors:
     10 *	 Christoph Raisch <raisch@de.ibm.com>
     11 *	 Jan-Bernd Themann <themann@de.ibm.com>
     12 *	 Thomas Klein <tklein@de.ibm.com>
     13 */
     14
     15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     16
     17#include <linux/device.h>
     18#include <linux/in.h>
     19#include <linux/ip.h>
     20#include <linux/tcp.h>
     21#include <linux/udp.h>
     22#include <linux/if.h>
     23#include <linux/list.h>
     24#include <linux/slab.h>
     25#include <linux/if_ether.h>
     26#include <linux/notifier.h>
     27#include <linux/reboot.h>
     28#include <linux/memory.h>
     29#include <asm/kexec.h>
     30#include <linux/mutex.h>
     31#include <linux/prefetch.h>
     32#include <linux/of.h>
     33#include <linux/of_device.h>
     34
     35#include <net/ip.h>
     36
     37#include "ehea.h"
     38#include "ehea_qmr.h"
     39#include "ehea_phyp.h"
     40
     41
     42MODULE_LICENSE("GPL");
     43MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
     44MODULE_DESCRIPTION("IBM eServer HEA Driver");
     45MODULE_VERSION(DRV_VERSION);
     46
     47
     48static int msg_level = -1;
     49static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
     50static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
     51static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
     52static int sq_entries = EHEA_DEF_ENTRIES_SQ;
     53static int use_mcs = 1;
     54static int prop_carrier_state;
     55
     56module_param(msg_level, int, 0);
     57module_param(rq1_entries, int, 0);
     58module_param(rq2_entries, int, 0);
     59module_param(rq3_entries, int, 0);
     60module_param(sq_entries, int, 0);
     61module_param(prop_carrier_state, int, 0);
     62module_param(use_mcs, int, 0);
     63
     64MODULE_PARM_DESC(msg_level, "msg_level");
     65MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
     66		 "port to stack. 1:yes, 0:no.  Default = 0 ");
     67MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
     68		 "[2^x - 1], x = [7..14]. Default = "
     69		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
     70MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
     71		 "[2^x - 1], x = [7..14]. Default = "
     72		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
     73MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
     74		 "[2^x - 1], x = [7..14]. Default = "
     75		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
     76MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
     77		 "[2^x - 1], x = [7..14]. Default = "
     78		 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
     79MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
     80		 "Default = 1");
     81
     82static int port_name_cnt;
     83static LIST_HEAD(adapter_list);
     84static unsigned long ehea_driver_flags;
     85static DEFINE_MUTEX(dlpar_mem_lock);
     86static struct ehea_fw_handle_array ehea_fw_handles;
     87static struct ehea_bcmc_reg_array ehea_bcmc_regs;
     88
     89
     90static int ehea_probe_adapter(struct platform_device *dev);
     91
     92static int ehea_remove(struct platform_device *dev);
     93
     94static const struct of_device_id ehea_module_device_table[] = {
     95	{
     96		.name = "lhea",
     97		.compatible = "IBM,lhea",
     98	},
     99	{
    100		.type = "network",
    101		.compatible = "IBM,lhea-ethernet",
    102	},
    103	{},
    104};
    105MODULE_DEVICE_TABLE(of, ehea_module_device_table);
    106
    107static const struct of_device_id ehea_device_table[] = {
    108	{
    109		.name = "lhea",
    110		.compatible = "IBM,lhea",
    111	},
    112	{},
    113};
    114MODULE_DEVICE_TABLE(of, ehea_device_table);
    115
    116static struct platform_driver ehea_driver = {
    117	.driver = {
    118		.name = "ehea",
    119		.owner = THIS_MODULE,
    120		.of_match_table = ehea_device_table,
    121	},
    122	.probe = ehea_probe_adapter,
    123	.remove = ehea_remove,
    124};
    125
    126void ehea_dump(void *adr, int len, char *msg)
    127{
    128	int x;
    129	unsigned char *deb = adr;
    130	for (x = 0; x < len; x += 16) {
    131		pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
    132			msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
    133		deb += 16;
    134	}
    135}
    136
    137static void ehea_schedule_port_reset(struct ehea_port *port)
    138{
    139	if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
    140		schedule_work(&port->reset_task);
    141}
    142
    143static void ehea_update_firmware_handles(void)
    144{
    145	struct ehea_fw_handle_entry *arr = NULL;
    146	struct ehea_adapter *adapter;
    147	int num_adapters = 0;
    148	int num_ports = 0;
    149	int num_portres = 0;
    150	int i = 0;
    151	int num_fw_handles, k, l;
    152
    153	/* Determine number of handles */
    154	mutex_lock(&ehea_fw_handles.lock);
    155
    156	list_for_each_entry(adapter, &adapter_list, list) {
    157		num_adapters++;
    158
    159		for (k = 0; k < EHEA_MAX_PORTS; k++) {
    160			struct ehea_port *port = adapter->port[k];
    161
    162			if (!port || (port->state != EHEA_PORT_UP))
    163				continue;
    164
    165			num_ports++;
    166			num_portres += port->num_def_qps;
    167		}
    168	}
    169
    170	num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
    171			 num_ports * EHEA_NUM_PORT_FW_HANDLES +
    172			 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
    173
    174	if (num_fw_handles) {
    175		arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
    176		if (!arr)
    177			goto out;  /* Keep the existing array */
    178	} else
    179		goto out_update;
    180
    181	list_for_each_entry(adapter, &adapter_list, list) {
    182		if (num_adapters == 0)
    183			break;
    184
    185		for (k = 0; k < EHEA_MAX_PORTS; k++) {
    186			struct ehea_port *port = adapter->port[k];
    187
    188			if (!port || (port->state != EHEA_PORT_UP) ||
    189			    (num_ports == 0))
    190				continue;
    191
    192			for (l = 0; l < port->num_def_qps; l++) {
    193				struct ehea_port_res *pr = &port->port_res[l];
    194
    195				arr[i].adh = adapter->handle;
    196				arr[i++].fwh = pr->qp->fw_handle;
    197				arr[i].adh = adapter->handle;
    198				arr[i++].fwh = pr->send_cq->fw_handle;
    199				arr[i].adh = adapter->handle;
    200				arr[i++].fwh = pr->recv_cq->fw_handle;
    201				arr[i].adh = adapter->handle;
    202				arr[i++].fwh = pr->eq->fw_handle;
    203				arr[i].adh = adapter->handle;
    204				arr[i++].fwh = pr->send_mr.handle;
    205				arr[i].adh = adapter->handle;
    206				arr[i++].fwh = pr->recv_mr.handle;
    207			}
    208			arr[i].adh = adapter->handle;
    209			arr[i++].fwh = port->qp_eq->fw_handle;
    210			num_ports--;
    211		}
    212
    213		arr[i].adh = adapter->handle;
    214		arr[i++].fwh = adapter->neq->fw_handle;
    215
    216		if (adapter->mr.handle) {
    217			arr[i].adh = adapter->handle;
    218			arr[i++].fwh = adapter->mr.handle;
    219		}
    220		num_adapters--;
    221	}
    222
    223out_update:
    224	kfree(ehea_fw_handles.arr);
    225	ehea_fw_handles.arr = arr;
    226	ehea_fw_handles.num_entries = i;
    227out:
    228	mutex_unlock(&ehea_fw_handles.lock);
    229}
    230
    231static void ehea_update_bcmc_registrations(void)
    232{
    233	unsigned long flags;
    234	struct ehea_bcmc_reg_entry *arr = NULL;
    235	struct ehea_adapter *adapter;
    236	struct ehea_mc_list *mc_entry;
    237	int num_registrations = 0;
    238	int i = 0;
    239	int k;
    240
    241	spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
    242
    243	/* Determine number of registrations */
    244	list_for_each_entry(adapter, &adapter_list, list)
    245		for (k = 0; k < EHEA_MAX_PORTS; k++) {
    246			struct ehea_port *port = adapter->port[k];
    247
    248			if (!port || (port->state != EHEA_PORT_UP))
    249				continue;
    250
    251			num_registrations += 2;	/* Broadcast registrations */
    252
    253			list_for_each_entry(mc_entry, &port->mc_list->list,list)
    254				num_registrations += 2;
    255		}
    256
    257	if (num_registrations) {
    258		arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
    259		if (!arr)
    260			goto out;  /* Keep the existing array */
    261	} else
    262		goto out_update;
    263
    264	list_for_each_entry(adapter, &adapter_list, list) {
    265		for (k = 0; k < EHEA_MAX_PORTS; k++) {
    266			struct ehea_port *port = adapter->port[k];
    267
    268			if (!port || (port->state != EHEA_PORT_UP))
    269				continue;
    270
    271			if (num_registrations == 0)
    272				goto out_update;
    273
    274			arr[i].adh = adapter->handle;
    275			arr[i].port_id = port->logical_port_id;
    276			arr[i].reg_type = EHEA_BCMC_BROADCAST |
    277					  EHEA_BCMC_UNTAGGED;
    278			arr[i++].macaddr = port->mac_addr;
    279
    280			arr[i].adh = adapter->handle;
    281			arr[i].port_id = port->logical_port_id;
    282			arr[i].reg_type = EHEA_BCMC_BROADCAST |
    283					  EHEA_BCMC_VLANID_ALL;
    284			arr[i++].macaddr = port->mac_addr;
    285			num_registrations -= 2;
    286
    287			list_for_each_entry(mc_entry,
    288					    &port->mc_list->list, list) {
    289				if (num_registrations == 0)
    290					goto out_update;
    291
    292				arr[i].adh = adapter->handle;
    293				arr[i].port_id = port->logical_port_id;
    294				arr[i].reg_type = EHEA_BCMC_MULTICAST |
    295						  EHEA_BCMC_UNTAGGED;
    296				if (mc_entry->macaddr == 0)
    297					arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
    298				arr[i++].macaddr = mc_entry->macaddr;
    299
    300				arr[i].adh = adapter->handle;
    301				arr[i].port_id = port->logical_port_id;
    302				arr[i].reg_type = EHEA_BCMC_MULTICAST |
    303						  EHEA_BCMC_VLANID_ALL;
    304				if (mc_entry->macaddr == 0)
    305					arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
    306				arr[i++].macaddr = mc_entry->macaddr;
    307				num_registrations -= 2;
    308			}
    309		}
    310	}
    311
    312out_update:
    313	kfree(ehea_bcmc_regs.arr);
    314	ehea_bcmc_regs.arr = arr;
    315	ehea_bcmc_regs.num_entries = i;
    316out:
    317	spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
    318}
    319
    320static void ehea_get_stats64(struct net_device *dev,
    321			     struct rtnl_link_stats64 *stats)
    322{
    323	struct ehea_port *port = netdev_priv(dev);
    324	u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
    325	int i;
    326
    327	for (i = 0; i < port->num_def_qps; i++) {
    328		rx_packets += port->port_res[i].rx_packets;
    329		rx_bytes   += port->port_res[i].rx_bytes;
    330	}
    331
    332	for (i = 0; i < port->num_def_qps; i++) {
    333		tx_packets += port->port_res[i].tx_packets;
    334		tx_bytes   += port->port_res[i].tx_bytes;
    335	}
    336
    337	stats->tx_packets = tx_packets;
    338	stats->rx_bytes = rx_bytes;
    339	stats->tx_bytes = tx_bytes;
    340	stats->rx_packets = rx_packets;
    341
    342	stats->multicast = port->stats.multicast;
    343	stats->rx_errors = port->stats.rx_errors;
    344}
    345
    346static void ehea_update_stats(struct work_struct *work)
    347{
    348	struct ehea_port *port =
    349		container_of(work, struct ehea_port, stats_work.work);
    350	struct net_device *dev = port->netdev;
    351	struct rtnl_link_stats64 *stats = &port->stats;
    352	struct hcp_ehea_port_cb2 *cb2;
    353	u64 hret;
    354
    355	cb2 = (void *)get_zeroed_page(GFP_KERNEL);
    356	if (!cb2) {
    357		netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
    358		goto resched;
    359	}
    360
    361	hret = ehea_h_query_ehea_port(port->adapter->handle,
    362				      port->logical_port_id,
    363				      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
    364	if (hret != H_SUCCESS) {
    365		netdev_err(dev, "query_ehea_port failed\n");
    366		goto out_herr;
    367	}
    368
    369	if (netif_msg_hw(port))
    370		ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
    371
    372	stats->multicast = cb2->rxmcp;
    373	stats->rx_errors = cb2->rxuerr;
    374
    375out_herr:
    376	free_page((unsigned long)cb2);
    377resched:
    378	schedule_delayed_work(&port->stats_work,
    379			      round_jiffies_relative(msecs_to_jiffies(1000)));
    380}
    381
    382static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
    383{
    384	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
    385	struct net_device *dev = pr->port->netdev;
    386	int max_index_mask = pr->rq1_skba.len - 1;
    387	int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
    388	int adder = 0;
    389	int i;
    390
    391	pr->rq1_skba.os_skbs = 0;
    392
    393	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
    394		if (nr_of_wqes > 0)
    395			pr->rq1_skba.index = index;
    396		pr->rq1_skba.os_skbs = fill_wqes;
    397		return;
    398	}
    399
    400	for (i = 0; i < fill_wqes; i++) {
    401		if (!skb_arr_rq1[index]) {
    402			skb_arr_rq1[index] = netdev_alloc_skb(dev,
    403							      EHEA_L_PKT_SIZE);
    404			if (!skb_arr_rq1[index]) {
    405				pr->rq1_skba.os_skbs = fill_wqes - i;
    406				break;
    407			}
    408		}
    409		index--;
    410		index &= max_index_mask;
    411		adder++;
    412	}
    413
    414	if (adder == 0)
    415		return;
    416
    417	/* Ring doorbell */
    418	ehea_update_rq1a(pr->qp, adder);
    419}
    420
    421static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
    422{
    423	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
    424	struct net_device *dev = pr->port->netdev;
    425	int i;
    426
    427	if (nr_rq1a > pr->rq1_skba.len) {
    428		netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
    429		return;
    430	}
    431
    432	for (i = 0; i < nr_rq1a; i++) {
    433		skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
    434		if (!skb_arr_rq1[i])
    435			break;
    436	}
    437	/* Ring doorbell */
    438	ehea_update_rq1a(pr->qp, i - 1);
    439}
    440
    441static int ehea_refill_rq_def(struct ehea_port_res *pr,
    442			      struct ehea_q_skb_arr *q_skba, int rq_nr,
    443			      int num_wqes, int wqe_type, int packet_size)
    444{
    445	struct net_device *dev = pr->port->netdev;
    446	struct ehea_qp *qp = pr->qp;
    447	struct sk_buff **skb_arr = q_skba->arr;
    448	struct ehea_rwqe *rwqe;
    449	int i, index, max_index_mask, fill_wqes;
    450	int adder = 0;
    451	int ret = 0;
    452
    453	fill_wqes = q_skba->os_skbs + num_wqes;
    454	q_skba->os_skbs = 0;
    455
    456	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
    457		q_skba->os_skbs = fill_wqes;
    458		return ret;
    459	}
    460
    461	index = q_skba->index;
    462	max_index_mask = q_skba->len - 1;
    463	for (i = 0; i < fill_wqes; i++) {
    464		u64 tmp_addr;
    465		struct sk_buff *skb;
    466
    467		skb = netdev_alloc_skb_ip_align(dev, packet_size);
    468		if (!skb) {
    469			q_skba->os_skbs = fill_wqes - i;
    470			if (q_skba->os_skbs == q_skba->len - 2) {
    471				netdev_info(pr->port->netdev,
    472					    "rq%i ran dry - no mem for skb\n",
    473					    rq_nr);
    474				ret = -ENOMEM;
    475			}
    476			break;
    477		}
    478
    479		skb_arr[index] = skb;
    480		tmp_addr = ehea_map_vaddr(skb->data);
    481		if (tmp_addr == -1) {
    482			dev_consume_skb_any(skb);
    483			q_skba->os_skbs = fill_wqes - i;
    484			ret = 0;
    485			break;
    486		}
    487
    488		rwqe = ehea_get_next_rwqe(qp, rq_nr);
    489		rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
    490			    | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
    491		rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
    492		rwqe->sg_list[0].vaddr = tmp_addr;
    493		rwqe->sg_list[0].len = packet_size;
    494		rwqe->data_segments = 1;
    495
    496		index++;
    497		index &= max_index_mask;
    498		adder++;
    499	}
    500
    501	q_skba->index = index;
    502	if (adder == 0)
    503		goto out;
    504
    505	/* Ring doorbell */
    506	iosync();
    507	if (rq_nr == 2)
    508		ehea_update_rq2a(pr->qp, adder);
    509	else
    510		ehea_update_rq3a(pr->qp, adder);
    511out:
    512	return ret;
    513}
    514
    515
    516static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
    517{
    518	return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
    519				  nr_of_wqes, EHEA_RWQE2_TYPE,
    520				  EHEA_RQ2_PKT_SIZE);
    521}
    522
    523
    524static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
    525{
    526	return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
    527				  nr_of_wqes, EHEA_RWQE3_TYPE,
    528				  EHEA_MAX_PACKET_SIZE);
    529}
    530
    531static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
    532{
    533	*rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
    534	if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
    535		return 0;
    536	if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
    537	    (cqe->header_length == 0))
    538		return 0;
    539	return -EINVAL;
    540}
    541
    542static inline void ehea_fill_skb(struct net_device *dev,
    543				 struct sk_buff *skb, struct ehea_cqe *cqe,
    544				 struct ehea_port_res *pr)
    545{
    546	int length = cqe->num_bytes_transfered - 4;	/*remove CRC */
    547
    548	skb_put(skb, length);
    549	skb->protocol = eth_type_trans(skb, dev);
    550
    551	/* The packet was not an IPV4 packet so a complemented checksum was
    552	   calculated. The value is found in the Internet Checksum field. */
    553	if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
    554		skb->ip_summed = CHECKSUM_COMPLETE;
    555		skb->csum = csum_unfold(~cqe->inet_checksum_value);
    556	} else
    557		skb->ip_summed = CHECKSUM_UNNECESSARY;
    558
    559	skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
    560}
    561
    562static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
    563					       int arr_len,
    564					       struct ehea_cqe *cqe)
    565{
    566	int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
    567	struct sk_buff *skb;
    568	void *pref;
    569	int x;
    570
    571	x = skb_index + 1;
    572	x &= (arr_len - 1);
    573
    574	pref = skb_array[x];
    575	if (pref) {
    576		prefetchw(pref);
    577		prefetchw(pref + EHEA_CACHE_LINE);
    578
    579		pref = (skb_array[x]->data);
    580		prefetch(pref);
    581		prefetch(pref + EHEA_CACHE_LINE);
    582		prefetch(pref + EHEA_CACHE_LINE * 2);
    583		prefetch(pref + EHEA_CACHE_LINE * 3);
    584	}
    585
    586	skb = skb_array[skb_index];
    587	skb_array[skb_index] = NULL;
    588	return skb;
    589}
    590
    591static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
    592						  int arr_len, int wqe_index)
    593{
    594	struct sk_buff *skb;
    595	void *pref;
    596	int x;
    597
    598	x = wqe_index + 1;
    599	x &= (arr_len - 1);
    600
    601	pref = skb_array[x];
    602	if (pref) {
    603		prefetchw(pref);
    604		prefetchw(pref + EHEA_CACHE_LINE);
    605
    606		pref = (skb_array[x]->data);
    607		prefetchw(pref);
    608		prefetchw(pref + EHEA_CACHE_LINE);
    609	}
    610
    611	skb = skb_array[wqe_index];
    612	skb_array[wqe_index] = NULL;
    613	return skb;
    614}
    615
    616static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
    617				 struct ehea_cqe *cqe, int *processed_rq2,
    618				 int *processed_rq3)
    619{
    620	struct sk_buff *skb;
    621
    622	if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
    623		pr->p_stats.err_tcp_cksum++;
    624	if (cqe->status & EHEA_CQE_STAT_ERR_IP)
    625		pr->p_stats.err_ip_cksum++;
    626	if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
    627		pr->p_stats.err_frame_crc++;
    628
    629	if (rq == 2) {
    630		*processed_rq2 += 1;
    631		skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
    632		dev_kfree_skb(skb);
    633	} else if (rq == 3) {
    634		*processed_rq3 += 1;
    635		skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
    636		dev_kfree_skb(skb);
    637	}
    638
    639	if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
    640		if (netif_msg_rx_err(pr->port)) {
    641			pr_err("Critical receive error for QP %d. Resetting port.\n",
    642			       pr->qp->init_attr.qp_nr);
    643			ehea_dump(cqe, sizeof(*cqe), "CQE");
    644		}
    645		ehea_schedule_port_reset(pr->port);
    646		return 1;
    647	}
    648
    649	return 0;
    650}
    651
    652static int ehea_proc_rwqes(struct net_device *dev,
    653			   struct ehea_port_res *pr,
    654			   int budget)
    655{
    656	struct ehea_port *port = pr->port;
    657	struct ehea_qp *qp = pr->qp;
    658	struct ehea_cqe *cqe;
    659	struct sk_buff *skb;
    660	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
    661	struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
    662	struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
    663	int skb_arr_rq1_len = pr->rq1_skba.len;
    664	int skb_arr_rq2_len = pr->rq2_skba.len;
    665	int skb_arr_rq3_len = pr->rq3_skba.len;
    666	int processed, processed_rq1, processed_rq2, processed_rq3;
    667	u64 processed_bytes = 0;
    668	int wqe_index, last_wqe_index, rq, port_reset;
    669
    670	processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
    671	last_wqe_index = 0;
    672
    673	cqe = ehea_poll_rq1(qp, &wqe_index);
    674	while ((processed < budget) && cqe) {
    675		ehea_inc_rq1(qp);
    676		processed_rq1++;
    677		processed++;
    678		if (netif_msg_rx_status(port))
    679			ehea_dump(cqe, sizeof(*cqe), "CQE");
    680
    681		last_wqe_index = wqe_index;
    682		rmb();
    683		if (!ehea_check_cqe(cqe, &rq)) {
    684			if (rq == 1) {
    685				/* LL RQ1 */
    686				skb = get_skb_by_index_ll(skb_arr_rq1,
    687							  skb_arr_rq1_len,
    688							  wqe_index);
    689				if (unlikely(!skb)) {
    690					netif_info(port, rx_err, dev,
    691						  "LL rq1: skb=NULL\n");
    692
    693					skb = netdev_alloc_skb(dev,
    694							       EHEA_L_PKT_SIZE);
    695					if (!skb)
    696						break;
    697				}
    698				skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
    699						 cqe->num_bytes_transfered - 4);
    700				ehea_fill_skb(dev, skb, cqe, pr);
    701			} else if (rq == 2) {
    702				/* RQ2 */
    703				skb = get_skb_by_index(skb_arr_rq2,
    704						       skb_arr_rq2_len, cqe);
    705				if (unlikely(!skb)) {
    706					netif_err(port, rx_err, dev,
    707						  "rq2: skb=NULL\n");
    708					break;
    709				}
    710				ehea_fill_skb(dev, skb, cqe, pr);
    711				processed_rq2++;
    712			} else {
    713				/* RQ3 */
    714				skb = get_skb_by_index(skb_arr_rq3,
    715						       skb_arr_rq3_len, cqe);
    716				if (unlikely(!skb)) {
    717					netif_err(port, rx_err, dev,
    718						  "rq3: skb=NULL\n");
    719					break;
    720				}
    721				ehea_fill_skb(dev, skb, cqe, pr);
    722				processed_rq3++;
    723			}
    724
    725			processed_bytes += skb->len;
    726
    727			if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
    728				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
    729						       cqe->vlan_tag);
    730
    731			napi_gro_receive(&pr->napi, skb);
    732		} else {
    733			pr->p_stats.poll_receive_errors++;
    734			port_reset = ehea_treat_poll_error(pr, rq, cqe,
    735							   &processed_rq2,
    736							   &processed_rq3);
    737			if (port_reset)
    738				break;
    739		}
    740		cqe = ehea_poll_rq1(qp, &wqe_index);
    741	}
    742
    743	pr->rx_packets += processed;
    744	pr->rx_bytes += processed_bytes;
    745
    746	ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
    747	ehea_refill_rq2(pr, processed_rq2);
    748	ehea_refill_rq3(pr, processed_rq3);
    749
    750	return processed;
    751}
    752
    753#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
    754
    755static void reset_sq_restart_flag(struct ehea_port *port)
    756{
    757	int i;
    758
    759	for (i = 0; i < port->num_def_qps; i++) {
    760		struct ehea_port_res *pr = &port->port_res[i];
    761		pr->sq_restart_flag = 0;
    762	}
    763	wake_up(&port->restart_wq);
    764}
    765
    766static void check_sqs(struct ehea_port *port)
    767{
    768	struct ehea_swqe *swqe;
    769	int swqe_index;
    770	int i;
    771
    772	for (i = 0; i < port->num_def_qps; i++) {
    773		struct ehea_port_res *pr = &port->port_res[i];
    774		int ret;
    775		swqe = ehea_get_swqe(pr->qp, &swqe_index);
    776		memset(swqe, 0, SWQE_HEADER_SIZE);
    777		atomic_dec(&pr->swqe_avail);
    778
    779		swqe->tx_control |= EHEA_SWQE_PURGE;
    780		swqe->wr_id = SWQE_RESTART_CHECK;
    781		swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
    782		swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
    783		swqe->immediate_data_length = 80;
    784
    785		ehea_post_swqe(pr->qp, swqe);
    786
    787		ret = wait_event_timeout(port->restart_wq,
    788					 pr->sq_restart_flag == 0,
    789					 msecs_to_jiffies(100));
    790
    791		if (!ret) {
    792			pr_err("HW/SW queues out of sync\n");
    793			ehea_schedule_port_reset(pr->port);
    794			return;
    795		}
    796	}
    797}
    798
    799
    800static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
    801{
    802	struct sk_buff *skb;
    803	struct ehea_cq *send_cq = pr->send_cq;
    804	struct ehea_cqe *cqe;
    805	int quota = my_quota;
    806	int cqe_counter = 0;
    807	int swqe_av = 0;
    808	int index;
    809	struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
    810						pr - &pr->port->port_res[0]);
    811
    812	cqe = ehea_poll_cq(send_cq);
    813	while (cqe && (quota > 0)) {
    814		ehea_inc_cq(send_cq);
    815
    816		cqe_counter++;
    817		rmb();
    818
    819		if (cqe->wr_id == SWQE_RESTART_CHECK) {
    820			pr->sq_restart_flag = 1;
    821			swqe_av++;
    822			break;
    823		}
    824
    825		if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
    826			pr_err("Bad send completion status=0x%04X\n",
    827			       cqe->status);
    828
    829			if (netif_msg_tx_err(pr->port))
    830				ehea_dump(cqe, sizeof(*cqe), "Send CQE");
    831
    832			if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
    833				pr_err("Resetting port\n");
    834				ehea_schedule_port_reset(pr->port);
    835				break;
    836			}
    837		}
    838
    839		if (netif_msg_tx_done(pr->port))
    840			ehea_dump(cqe, sizeof(*cqe), "CQE");
    841
    842		if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
    843			   == EHEA_SWQE2_TYPE)) {
    844
    845			index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
    846			skb = pr->sq_skba.arr[index];
    847			dev_consume_skb_any(skb);
    848			pr->sq_skba.arr[index] = NULL;
    849		}
    850
    851		swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
    852		quota--;
    853
    854		cqe = ehea_poll_cq(send_cq);
    855	}
    856
    857	ehea_update_feca(send_cq, cqe_counter);
    858	atomic_add(swqe_av, &pr->swqe_avail);
    859
    860	if (unlikely(netif_tx_queue_stopped(txq) &&
    861		     (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
    862		__netif_tx_lock(txq, smp_processor_id());
    863		if (netif_tx_queue_stopped(txq) &&
    864		    (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
    865			netif_tx_wake_queue(txq);
    866		__netif_tx_unlock(txq);
    867	}
    868
    869	wake_up(&pr->port->swqe_avail_wq);
    870
    871	return cqe;
    872}
    873
    874#define EHEA_POLL_MAX_CQES 65535
    875
    876static int ehea_poll(struct napi_struct *napi, int budget)
    877{
    878	struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
    879						napi);
    880	struct net_device *dev = pr->port->netdev;
    881	struct ehea_cqe *cqe;
    882	struct ehea_cqe *cqe_skb = NULL;
    883	int wqe_index;
    884	int rx = 0;
    885
    886	cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
    887	rx += ehea_proc_rwqes(dev, pr, budget - rx);
    888
    889	while (rx != budget) {
    890		napi_complete(napi);
    891		ehea_reset_cq_ep(pr->recv_cq);
    892		ehea_reset_cq_ep(pr->send_cq);
    893		ehea_reset_cq_n1(pr->recv_cq);
    894		ehea_reset_cq_n1(pr->send_cq);
    895		rmb();
    896		cqe = ehea_poll_rq1(pr->qp, &wqe_index);
    897		cqe_skb = ehea_poll_cq(pr->send_cq);
    898
    899		if (!cqe && !cqe_skb)
    900			return rx;
    901
    902		if (!napi_reschedule(napi))
    903			return rx;
    904
    905		cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
    906		rx += ehea_proc_rwqes(dev, pr, budget - rx);
    907	}
    908
    909	return rx;
    910}
    911
    912static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
    913{
    914	struct ehea_port_res *pr = param;
    915
    916	napi_schedule(&pr->napi);
    917
    918	return IRQ_HANDLED;
    919}
    920
    921static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
    922{
    923	struct ehea_port *port = param;
    924	struct ehea_eqe *eqe;
    925	struct ehea_qp *qp;
    926	u32 qp_token;
    927	u64 resource_type, aer, aerr;
    928	int reset_port = 0;
    929
    930	eqe = ehea_poll_eq(port->qp_eq);
    931
    932	while (eqe) {
    933		qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
    934		pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
    935		       eqe->entry, qp_token);
    936
    937		qp = port->port_res[qp_token].qp;
    938
    939		resource_type = ehea_error_data(port->adapter, qp->fw_handle,
    940						&aer, &aerr);
    941
    942		if (resource_type == EHEA_AER_RESTYPE_QP) {
    943			if ((aer & EHEA_AER_RESET_MASK) ||
    944			    (aerr & EHEA_AERR_RESET_MASK))
    945				 reset_port = 1;
    946		} else
    947			reset_port = 1;   /* Reset in case of CQ or EQ error */
    948
    949		eqe = ehea_poll_eq(port->qp_eq);
    950	}
    951
    952	if (reset_port) {
    953		pr_err("Resetting port\n");
    954		ehea_schedule_port_reset(port);
    955	}
    956
    957	return IRQ_HANDLED;
    958}
    959
    960static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
    961				       int logical_port)
    962{
    963	int i;
    964
    965	for (i = 0; i < EHEA_MAX_PORTS; i++)
    966		if (adapter->port[i])
    967			if (adapter->port[i]->logical_port_id == logical_port)
    968				return adapter->port[i];
    969	return NULL;
    970}
    971
    972int ehea_sense_port_attr(struct ehea_port *port)
    973{
    974	int ret;
    975	u64 hret;
    976	struct hcp_ehea_port_cb0 *cb0;
    977
    978	/* may be called via ehea_neq_tasklet() */
    979	cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
    980	if (!cb0) {
    981		pr_err("no mem for cb0\n");
    982		ret = -ENOMEM;
    983		goto out;
    984	}
    985
    986	hret = ehea_h_query_ehea_port(port->adapter->handle,
    987				      port->logical_port_id, H_PORT_CB0,
    988				      EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
    989				      cb0);
    990	if (hret != H_SUCCESS) {
    991		ret = -EIO;
    992		goto out_free;
    993	}
    994
    995	/* MAC address */
    996	port->mac_addr = cb0->port_mac_addr << 16;
    997
    998	if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
    999		ret = -EADDRNOTAVAIL;
   1000		goto out_free;
   1001	}
   1002
   1003	/* Port speed */
   1004	switch (cb0->port_speed) {
   1005	case H_SPEED_10M_H:
   1006		port->port_speed = EHEA_SPEED_10M;
   1007		port->full_duplex = 0;
   1008		break;
   1009	case H_SPEED_10M_F:
   1010		port->port_speed = EHEA_SPEED_10M;
   1011		port->full_duplex = 1;
   1012		break;
   1013	case H_SPEED_100M_H:
   1014		port->port_speed = EHEA_SPEED_100M;
   1015		port->full_duplex = 0;
   1016		break;
   1017	case H_SPEED_100M_F:
   1018		port->port_speed = EHEA_SPEED_100M;
   1019		port->full_duplex = 1;
   1020		break;
   1021	case H_SPEED_1G_F:
   1022		port->port_speed = EHEA_SPEED_1G;
   1023		port->full_duplex = 1;
   1024		break;
   1025	case H_SPEED_10G_F:
   1026		port->port_speed = EHEA_SPEED_10G;
   1027		port->full_duplex = 1;
   1028		break;
   1029	default:
   1030		port->port_speed = 0;
   1031		port->full_duplex = 0;
   1032		break;
   1033	}
   1034
   1035	port->autoneg = 1;
   1036	port->num_mcs = cb0->num_default_qps;
   1037
   1038	/* Number of default QPs */
   1039	if (use_mcs)
   1040		port->num_def_qps = cb0->num_default_qps;
   1041	else
   1042		port->num_def_qps = 1;
   1043
   1044	if (!port->num_def_qps) {
   1045		ret = -EINVAL;
   1046		goto out_free;
   1047	}
   1048
   1049	ret = 0;
   1050out_free:
   1051	if (ret || netif_msg_probe(port))
   1052		ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
   1053	free_page((unsigned long)cb0);
   1054out:
   1055	return ret;
   1056}
   1057
   1058int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
   1059{
   1060	struct hcp_ehea_port_cb4 *cb4;
   1061	u64 hret;
   1062	int ret = 0;
   1063
   1064	cb4 = (void *)get_zeroed_page(GFP_KERNEL);
   1065	if (!cb4) {
   1066		pr_err("no mem for cb4\n");
   1067		ret = -ENOMEM;
   1068		goto out;
   1069	}
   1070
   1071	cb4->port_speed = port_speed;
   1072
   1073	netif_carrier_off(port->netdev);
   1074
   1075	hret = ehea_h_modify_ehea_port(port->adapter->handle,
   1076				       port->logical_port_id,
   1077				       H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
   1078	if (hret == H_SUCCESS) {
   1079		port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
   1080
   1081		hret = ehea_h_query_ehea_port(port->adapter->handle,
   1082					      port->logical_port_id,
   1083					      H_PORT_CB4, H_PORT_CB4_SPEED,
   1084					      cb4);
   1085		if (hret == H_SUCCESS) {
   1086			switch (cb4->port_speed) {
   1087			case H_SPEED_10M_H:
   1088				port->port_speed = EHEA_SPEED_10M;
   1089				port->full_duplex = 0;
   1090				break;
   1091			case H_SPEED_10M_F:
   1092				port->port_speed = EHEA_SPEED_10M;
   1093				port->full_duplex = 1;
   1094				break;
   1095			case H_SPEED_100M_H:
   1096				port->port_speed = EHEA_SPEED_100M;
   1097				port->full_duplex = 0;
   1098				break;
   1099			case H_SPEED_100M_F:
   1100				port->port_speed = EHEA_SPEED_100M;
   1101				port->full_duplex = 1;
   1102				break;
   1103			case H_SPEED_1G_F:
   1104				port->port_speed = EHEA_SPEED_1G;
   1105				port->full_duplex = 1;
   1106				break;
   1107			case H_SPEED_10G_F:
   1108				port->port_speed = EHEA_SPEED_10G;
   1109				port->full_duplex = 1;
   1110				break;
   1111			default:
   1112				port->port_speed = 0;
   1113				port->full_duplex = 0;
   1114				break;
   1115			}
   1116		} else {
   1117			pr_err("Failed sensing port speed\n");
   1118			ret = -EIO;
   1119		}
   1120	} else {
   1121		if (hret == H_AUTHORITY) {
   1122			pr_info("Hypervisor denied setting port speed\n");
   1123			ret = -EPERM;
   1124		} else {
   1125			ret = -EIO;
   1126			pr_err("Failed setting port speed\n");
   1127		}
   1128	}
   1129	if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
   1130		netif_carrier_on(port->netdev);
   1131
   1132	free_page((unsigned long)cb4);
   1133out:
   1134	return ret;
   1135}
   1136
   1137static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
   1138{
   1139	int ret;
   1140	u8 ec;
   1141	u8 portnum;
   1142	struct ehea_port *port;
   1143	struct net_device *dev;
   1144
   1145	ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
   1146	portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
   1147	port = ehea_get_port(adapter, portnum);
   1148	if (!port) {
   1149		netdev_err(NULL, "unknown portnum %x\n", portnum);
   1150		return;
   1151	}
   1152	dev = port->netdev;
   1153
   1154	switch (ec) {
   1155	case EHEA_EC_PORTSTATE_CHG:	/* port state change */
   1156
   1157		if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
   1158			if (!netif_carrier_ok(dev)) {
   1159				ret = ehea_sense_port_attr(port);
   1160				if (ret) {
   1161					netdev_err(dev, "failed resensing port attributes\n");
   1162					break;
   1163				}
   1164
   1165				netif_info(port, link, dev,
   1166					   "Logical port up: %dMbps %s Duplex\n",
   1167					   port->port_speed,
   1168					   port->full_duplex == 1 ?
   1169					   "Full" : "Half");
   1170
   1171				netif_carrier_on(dev);
   1172				netif_wake_queue(dev);
   1173			}
   1174		} else
   1175			if (netif_carrier_ok(dev)) {
   1176				netif_info(port, link, dev,
   1177					   "Logical port down\n");
   1178				netif_carrier_off(dev);
   1179				netif_tx_disable(dev);
   1180			}
   1181
   1182		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
   1183			port->phy_link = EHEA_PHY_LINK_UP;
   1184			netif_info(port, link, dev,
   1185				   "Physical port up\n");
   1186			if (prop_carrier_state)
   1187				netif_carrier_on(dev);
   1188		} else {
   1189			port->phy_link = EHEA_PHY_LINK_DOWN;
   1190			netif_info(port, link, dev,
   1191				   "Physical port down\n");
   1192			if (prop_carrier_state)
   1193				netif_carrier_off(dev);
   1194		}
   1195
   1196		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
   1197			netdev_info(dev,
   1198				    "External switch port is primary port\n");
   1199		else
   1200			netdev_info(dev,
   1201				    "External switch port is backup port\n");
   1202
   1203		break;
   1204	case EHEA_EC_ADAPTER_MALFUNC:
   1205		netdev_err(dev, "Adapter malfunction\n");
   1206		break;
   1207	case EHEA_EC_PORT_MALFUNC:
   1208		netdev_info(dev, "Port malfunction\n");
   1209		netif_carrier_off(dev);
   1210		netif_tx_disable(dev);
   1211		break;
   1212	default:
   1213		netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
   1214		break;
   1215	}
   1216}
   1217
   1218static void ehea_neq_tasklet(struct tasklet_struct *t)
   1219{
   1220	struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
   1221	struct ehea_eqe *eqe;
   1222	u64 event_mask;
   1223
   1224	eqe = ehea_poll_eq(adapter->neq);
   1225	pr_debug("eqe=%p\n", eqe);
   1226
   1227	while (eqe) {
   1228		pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
   1229		ehea_parse_eqe(adapter, eqe->entry);
   1230		eqe = ehea_poll_eq(adapter->neq);
   1231		pr_debug("next eqe=%p\n", eqe);
   1232	}
   1233
   1234	event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
   1235		   | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
   1236		   | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
   1237
   1238	ehea_h_reset_events(adapter->handle,
   1239			    adapter->neq->fw_handle, event_mask);
   1240}
   1241
   1242static irqreturn_t ehea_interrupt_neq(int irq, void *param)
   1243{
   1244	struct ehea_adapter *adapter = param;
   1245	tasklet_hi_schedule(&adapter->neq_tasklet);
   1246	return IRQ_HANDLED;
   1247}
   1248
   1249
   1250static int ehea_fill_port_res(struct ehea_port_res *pr)
   1251{
   1252	int ret;
   1253	struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
   1254
   1255	ehea_init_fill_rq1(pr, pr->rq1_skba.len);
   1256
   1257	ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
   1258
   1259	ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
   1260
   1261	return ret;
   1262}
   1263
   1264static int ehea_reg_interrupts(struct net_device *dev)
   1265{
   1266	struct ehea_port *port = netdev_priv(dev);
   1267	struct ehea_port_res *pr;
   1268	int i, ret;
   1269
   1270
   1271	snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
   1272		 dev->name);
   1273
   1274	ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
   1275				  ehea_qp_aff_irq_handler,
   1276				  0, port->int_aff_name, port);
   1277	if (ret) {
   1278		netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
   1279			   port->qp_eq->attr.ist1);
   1280		goto out_free_qpeq;
   1281	}
   1282
   1283	netif_info(port, ifup, dev,
   1284		   "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
   1285		   port->qp_eq->attr.ist1);
   1286
   1287
   1288	for (i = 0; i < port->num_def_qps; i++) {
   1289		pr = &port->port_res[i];
   1290		snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
   1291			 "%s-queue%d", dev->name, i);
   1292		ret = ibmebus_request_irq(pr->eq->attr.ist1,
   1293					  ehea_recv_irq_handler,
   1294					  0, pr->int_send_name, pr);
   1295		if (ret) {
   1296			netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
   1297				   i, pr->eq->attr.ist1);
   1298			goto out_free_req;
   1299		}
   1300		netif_info(port, ifup, dev,
   1301			   "irq_handle 0x%X for function ehea_queue_int %d registered\n",
   1302			   pr->eq->attr.ist1, i);
   1303	}
   1304out:
   1305	return ret;
   1306
   1307
   1308out_free_req:
   1309	while (--i >= 0) {
   1310		u32 ist = port->port_res[i].eq->attr.ist1;
   1311		ibmebus_free_irq(ist, &port->port_res[i]);
   1312	}
   1313
   1314out_free_qpeq:
   1315	ibmebus_free_irq(port->qp_eq->attr.ist1, port);
   1316	i = port->num_def_qps;
   1317
   1318	goto out;
   1319
   1320}
   1321
   1322static void ehea_free_interrupts(struct net_device *dev)
   1323{
   1324	struct ehea_port *port = netdev_priv(dev);
   1325	struct ehea_port_res *pr;
   1326	int i;
   1327
   1328	/* send */
   1329
   1330	for (i = 0; i < port->num_def_qps; i++) {
   1331		pr = &port->port_res[i];
   1332		ibmebus_free_irq(pr->eq->attr.ist1, pr);
   1333		netif_info(port, intr, dev,
   1334			   "free send irq for res %d with handle 0x%X\n",
   1335			   i, pr->eq->attr.ist1);
   1336	}
   1337
   1338	/* associated events */
   1339	ibmebus_free_irq(port->qp_eq->attr.ist1, port);
   1340	netif_info(port, intr, dev,
   1341		   "associated event interrupt for handle 0x%X freed\n",
   1342		   port->qp_eq->attr.ist1);
   1343}
   1344
   1345static int ehea_configure_port(struct ehea_port *port)
   1346{
   1347	int ret, i;
   1348	u64 hret, mask;
   1349	struct hcp_ehea_port_cb0 *cb0;
   1350
   1351	ret = -ENOMEM;
   1352	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
   1353	if (!cb0)
   1354		goto out;
   1355
   1356	cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
   1357		     | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
   1358		     | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
   1359		     | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
   1360		     | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
   1361				      PXLY_RC_VLAN_FILTER)
   1362		     | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
   1363
   1364	for (i = 0; i < port->num_mcs; i++)
   1365		if (use_mcs)
   1366			cb0->default_qpn_arr[i] =
   1367				port->port_res[i].qp->init_attr.qp_nr;
   1368		else
   1369			cb0->default_qpn_arr[i] =
   1370				port->port_res[0].qp->init_attr.qp_nr;
   1371
   1372	if (netif_msg_ifup(port))
   1373		ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
   1374
   1375	mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
   1376	     | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
   1377
   1378	hret = ehea_h_modify_ehea_port(port->adapter->handle,
   1379				       port->logical_port_id,
   1380				       H_PORT_CB0, mask, cb0);
   1381	ret = -EIO;
   1382	if (hret != H_SUCCESS)
   1383		goto out_free;
   1384
   1385	ret = 0;
   1386
   1387out_free:
   1388	free_page((unsigned long)cb0);
   1389out:
   1390	return ret;
   1391}
   1392
   1393static int ehea_gen_smrs(struct ehea_port_res *pr)
   1394{
   1395	int ret;
   1396	struct ehea_adapter *adapter = pr->port->adapter;
   1397
   1398	ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
   1399	if (ret)
   1400		goto out;
   1401
   1402	ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
   1403	if (ret)
   1404		goto out_free;
   1405
   1406	return 0;
   1407
   1408out_free:
   1409	ehea_rem_mr(&pr->send_mr);
   1410out:
   1411	pr_err("Generating SMRS failed\n");
   1412	return -EIO;
   1413}
   1414
   1415static int ehea_rem_smrs(struct ehea_port_res *pr)
   1416{
   1417	if ((ehea_rem_mr(&pr->send_mr)) ||
   1418	    (ehea_rem_mr(&pr->recv_mr)))
   1419		return -EIO;
   1420	else
   1421		return 0;
   1422}
   1423
   1424static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
   1425{
   1426	int arr_size = sizeof(void *) * max_q_entries;
   1427
   1428	q_skba->arr = vzalloc(arr_size);
   1429	if (!q_skba->arr)
   1430		return -ENOMEM;
   1431
   1432	q_skba->len = max_q_entries;
   1433	q_skba->index = 0;
   1434	q_skba->os_skbs = 0;
   1435
   1436	return 0;
   1437}
   1438
   1439static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
   1440			      struct port_res_cfg *pr_cfg, int queue_token)
   1441{
   1442	struct ehea_adapter *adapter = port->adapter;
   1443	enum ehea_eq_type eq_type = EHEA_EQ;
   1444	struct ehea_qp_init_attr *init_attr = NULL;
   1445	int ret = -EIO;
   1446	u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
   1447
   1448	tx_bytes = pr->tx_bytes;
   1449	tx_packets = pr->tx_packets;
   1450	rx_bytes = pr->rx_bytes;
   1451	rx_packets = pr->rx_packets;
   1452
   1453	memset(pr, 0, sizeof(struct ehea_port_res));
   1454
   1455	pr->tx_bytes = tx_bytes;
   1456	pr->tx_packets = tx_packets;
   1457	pr->rx_bytes = rx_bytes;
   1458	pr->rx_packets = rx_packets;
   1459
   1460	pr->port = port;
   1461
   1462	pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
   1463	if (!pr->eq) {
   1464		pr_err("create_eq failed (eq)\n");
   1465		goto out_free;
   1466	}
   1467
   1468	pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
   1469				     pr->eq->fw_handle,
   1470				     port->logical_port_id);
   1471	if (!pr->recv_cq) {
   1472		pr_err("create_cq failed (cq_recv)\n");
   1473		goto out_free;
   1474	}
   1475
   1476	pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
   1477				     pr->eq->fw_handle,
   1478				     port->logical_port_id);
   1479	if (!pr->send_cq) {
   1480		pr_err("create_cq failed (cq_send)\n");
   1481		goto out_free;
   1482	}
   1483
   1484	if (netif_msg_ifup(port))
   1485		pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
   1486			pr->send_cq->attr.act_nr_of_cqes,
   1487			pr->recv_cq->attr.act_nr_of_cqes);
   1488
   1489	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
   1490	if (!init_attr) {
   1491		ret = -ENOMEM;
   1492		pr_err("no mem for ehea_qp_init_attr\n");
   1493		goto out_free;
   1494	}
   1495
   1496	init_attr->low_lat_rq1 = 1;
   1497	init_attr->signalingtype = 1;	/* generate CQE if specified in WQE */
   1498	init_attr->rq_count = 3;
   1499	init_attr->qp_token = queue_token;
   1500	init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
   1501	init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
   1502	init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
   1503	init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
   1504	init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
   1505	init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
   1506	init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
   1507	init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
   1508	init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
   1509	init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
   1510	init_attr->port_nr = port->logical_port_id;
   1511	init_attr->send_cq_handle = pr->send_cq->fw_handle;
   1512	init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
   1513	init_attr->aff_eq_handle = port->qp_eq->fw_handle;
   1514
   1515	pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
   1516	if (!pr->qp) {
   1517		pr_err("create_qp failed\n");
   1518		ret = -EIO;
   1519		goto out_free;
   1520	}
   1521
   1522	if (netif_msg_ifup(port))
   1523		pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
   1524			init_attr->qp_nr,
   1525			init_attr->act_nr_send_wqes,
   1526			init_attr->act_nr_rwqes_rq1,
   1527			init_attr->act_nr_rwqes_rq2,
   1528			init_attr->act_nr_rwqes_rq3);
   1529
   1530	pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
   1531
   1532	ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
   1533	ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
   1534	ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
   1535	ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
   1536	if (ret)
   1537		goto out_free;
   1538
   1539	pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
   1540	if (ehea_gen_smrs(pr) != 0) {
   1541		ret = -EIO;
   1542		goto out_free;
   1543	}
   1544
   1545	atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
   1546
   1547	kfree(init_attr);
   1548
   1549	netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
   1550
   1551	ret = 0;
   1552	goto out;
   1553
   1554out_free:
   1555	kfree(init_attr);
   1556	vfree(pr->sq_skba.arr);
   1557	vfree(pr->rq1_skba.arr);
   1558	vfree(pr->rq2_skba.arr);
   1559	vfree(pr->rq3_skba.arr);
   1560	ehea_destroy_qp(pr->qp);
   1561	ehea_destroy_cq(pr->send_cq);
   1562	ehea_destroy_cq(pr->recv_cq);
   1563	ehea_destroy_eq(pr->eq);
   1564out:
   1565	return ret;
   1566}
   1567
   1568static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
   1569{
   1570	int ret, i;
   1571
   1572	if (pr->qp)
   1573		netif_napi_del(&pr->napi);
   1574
   1575	ret = ehea_destroy_qp(pr->qp);
   1576
   1577	if (!ret) {
   1578		ehea_destroy_cq(pr->send_cq);
   1579		ehea_destroy_cq(pr->recv_cq);
   1580		ehea_destroy_eq(pr->eq);
   1581
   1582		for (i = 0; i < pr->rq1_skba.len; i++)
   1583			dev_kfree_skb(pr->rq1_skba.arr[i]);
   1584
   1585		for (i = 0; i < pr->rq2_skba.len; i++)
   1586			dev_kfree_skb(pr->rq2_skba.arr[i]);
   1587
   1588		for (i = 0; i < pr->rq3_skba.len; i++)
   1589			dev_kfree_skb(pr->rq3_skba.arr[i]);
   1590
   1591		for (i = 0; i < pr->sq_skba.len; i++)
   1592			dev_kfree_skb(pr->sq_skba.arr[i]);
   1593
   1594		vfree(pr->rq1_skba.arr);
   1595		vfree(pr->rq2_skba.arr);
   1596		vfree(pr->rq3_skba.arr);
   1597		vfree(pr->sq_skba.arr);
   1598		ret = ehea_rem_smrs(pr);
   1599	}
   1600	return ret;
   1601}
   1602
   1603static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
   1604				  u32 lkey)
   1605{
   1606	int skb_data_size = skb_headlen(skb);
   1607	u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
   1608	struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
   1609	unsigned int immediate_len = SWQE2_MAX_IMM;
   1610
   1611	swqe->descriptors = 0;
   1612
   1613	if (skb_is_gso(skb)) {
   1614		swqe->tx_control |= EHEA_SWQE_TSO;
   1615		swqe->mss = skb_shinfo(skb)->gso_size;
   1616		/*
   1617		 * For TSO packets we only copy the headers into the
   1618		 * immediate area.
   1619		 */
   1620		immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
   1621	}
   1622
   1623	if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
   1624		skb_copy_from_linear_data(skb, imm_data, immediate_len);
   1625		swqe->immediate_data_length = immediate_len;
   1626
   1627		if (skb_data_size > immediate_len) {
   1628			sg1entry->l_key = lkey;
   1629			sg1entry->len = skb_data_size - immediate_len;
   1630			sg1entry->vaddr =
   1631				ehea_map_vaddr(skb->data + immediate_len);
   1632			swqe->descriptors++;
   1633		}
   1634	} else {
   1635		skb_copy_from_linear_data(skb, imm_data, skb_data_size);
   1636		swqe->immediate_data_length = skb_data_size;
   1637	}
   1638}
   1639
   1640static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
   1641				    struct ehea_swqe *swqe, u32 lkey)
   1642{
   1643	struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
   1644	skb_frag_t *frag;
   1645	int nfrags, sg1entry_contains_frag_data, i;
   1646
   1647	nfrags = skb_shinfo(skb)->nr_frags;
   1648	sg1entry = &swqe->u.immdata_desc.sg_entry;
   1649	sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
   1650	sg1entry_contains_frag_data = 0;
   1651
   1652	write_swqe2_immediate(skb, swqe, lkey);
   1653
   1654	/* write descriptors */
   1655	if (nfrags > 0) {
   1656		if (swqe->descriptors == 0) {
   1657			/* sg1entry not yet used */
   1658			frag = &skb_shinfo(skb)->frags[0];
   1659
   1660			/* copy sg1entry data */
   1661			sg1entry->l_key = lkey;
   1662			sg1entry->len = skb_frag_size(frag);
   1663			sg1entry->vaddr =
   1664				ehea_map_vaddr(skb_frag_address(frag));
   1665			swqe->descriptors++;
   1666			sg1entry_contains_frag_data = 1;
   1667		}
   1668
   1669		for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
   1670
   1671			frag = &skb_shinfo(skb)->frags[i];
   1672			sgentry = &sg_list[i - sg1entry_contains_frag_data];
   1673
   1674			sgentry->l_key = lkey;
   1675			sgentry->len = skb_frag_size(frag);
   1676			sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
   1677			swqe->descriptors++;
   1678		}
   1679	}
   1680}
   1681
   1682static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
   1683{
   1684	int ret = 0;
   1685	u64 hret;
   1686	u8 reg_type;
   1687
   1688	/* De/Register untagged packets */
   1689	reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
   1690	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
   1691				     port->logical_port_id,
   1692				     reg_type, port->mac_addr, 0, hcallid);
   1693	if (hret != H_SUCCESS) {
   1694		pr_err("%sregistering bc address failed (tagged)\n",
   1695		       hcallid == H_REG_BCMC ? "" : "de");
   1696		ret = -EIO;
   1697		goto out_herr;
   1698	}
   1699
   1700	/* De/Register VLAN packets */
   1701	reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
   1702	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
   1703				     port->logical_port_id,
   1704				     reg_type, port->mac_addr, 0, hcallid);
   1705	if (hret != H_SUCCESS) {
   1706		pr_err("%sregistering bc address failed (vlan)\n",
   1707		       hcallid == H_REG_BCMC ? "" : "de");
   1708		ret = -EIO;
   1709	}
   1710out_herr:
   1711	return ret;
   1712}
   1713
   1714static int ehea_set_mac_addr(struct net_device *dev, void *sa)
   1715{
   1716	struct ehea_port *port = netdev_priv(dev);
   1717	struct sockaddr *mac_addr = sa;
   1718	struct hcp_ehea_port_cb0 *cb0;
   1719	int ret;
   1720	u64 hret;
   1721
   1722	if (!is_valid_ether_addr(mac_addr->sa_data)) {
   1723		ret = -EADDRNOTAVAIL;
   1724		goto out;
   1725	}
   1726
   1727	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
   1728	if (!cb0) {
   1729		pr_err("no mem for cb0\n");
   1730		ret = -ENOMEM;
   1731		goto out;
   1732	}
   1733
   1734	memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
   1735
   1736	cb0->port_mac_addr = cb0->port_mac_addr >> 16;
   1737
   1738	hret = ehea_h_modify_ehea_port(port->adapter->handle,
   1739				       port->logical_port_id, H_PORT_CB0,
   1740				       EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
   1741	if (hret != H_SUCCESS) {
   1742		ret = -EIO;
   1743		goto out_free;
   1744	}
   1745
   1746	eth_hw_addr_set(dev, mac_addr->sa_data);
   1747
   1748	/* Deregister old MAC in pHYP */
   1749	if (port->state == EHEA_PORT_UP) {
   1750		ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
   1751		if (ret)
   1752			goto out_upregs;
   1753	}
   1754
   1755	port->mac_addr = cb0->port_mac_addr << 16;
   1756
   1757	/* Register new MAC in pHYP */
   1758	if (port->state == EHEA_PORT_UP) {
   1759		ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
   1760		if (ret)
   1761			goto out_upregs;
   1762	}
   1763
   1764	ret = 0;
   1765
   1766out_upregs:
   1767	ehea_update_bcmc_registrations();
   1768out_free:
   1769	free_page((unsigned long)cb0);
   1770out:
   1771	return ret;
   1772}
   1773
   1774static void ehea_promiscuous_error(u64 hret, int enable)
   1775{
   1776	if (hret == H_AUTHORITY)
   1777		pr_info("Hypervisor denied %sabling promiscuous mode\n",
   1778			enable == 1 ? "en" : "dis");
   1779	else
   1780		pr_err("failed %sabling promiscuous mode\n",
   1781		       enable == 1 ? "en" : "dis");
   1782}
   1783
   1784static void ehea_promiscuous(struct net_device *dev, int enable)
   1785{
   1786	struct ehea_port *port = netdev_priv(dev);
   1787	struct hcp_ehea_port_cb7 *cb7;
   1788	u64 hret;
   1789
   1790	if (enable == port->promisc)
   1791		return;
   1792
   1793	cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
   1794	if (!cb7) {
   1795		pr_err("no mem for cb7\n");
   1796		goto out;
   1797	}
   1798
   1799	/* Modify Pxs_DUCQPN in CB7 */
   1800	cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
   1801
   1802	hret = ehea_h_modify_ehea_port(port->adapter->handle,
   1803				       port->logical_port_id,
   1804				       H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
   1805	if (hret) {
   1806		ehea_promiscuous_error(hret, enable);
   1807		goto out;
   1808	}
   1809
   1810	port->promisc = enable;
   1811out:
   1812	free_page((unsigned long)cb7);
   1813}
   1814
   1815static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
   1816				     u32 hcallid)
   1817{
   1818	u64 hret;
   1819	u8 reg_type;
   1820
   1821	reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
   1822	if (mc_mac_addr == 0)
   1823		reg_type |= EHEA_BCMC_SCOPE_ALL;
   1824
   1825	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
   1826				     port->logical_port_id,
   1827				     reg_type, mc_mac_addr, 0, hcallid);
   1828	if (hret)
   1829		goto out;
   1830
   1831	reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
   1832	if (mc_mac_addr == 0)
   1833		reg_type |= EHEA_BCMC_SCOPE_ALL;
   1834
   1835	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
   1836				     port->logical_port_id,
   1837				     reg_type, mc_mac_addr, 0, hcallid);
   1838out:
   1839	return hret;
   1840}
   1841
   1842static int ehea_drop_multicast_list(struct net_device *dev)
   1843{
   1844	struct ehea_port *port = netdev_priv(dev);
   1845	struct ehea_mc_list *mc_entry = port->mc_list;
   1846	struct list_head *pos;
   1847	struct list_head *temp;
   1848	int ret = 0;
   1849	u64 hret;
   1850
   1851	list_for_each_safe(pos, temp, &(port->mc_list->list)) {
   1852		mc_entry = list_entry(pos, struct ehea_mc_list, list);
   1853
   1854		hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
   1855						 H_DEREG_BCMC);
   1856		if (hret) {
   1857			pr_err("failed deregistering mcast MAC\n");
   1858			ret = -EIO;
   1859		}
   1860
   1861		list_del(pos);
   1862		kfree(mc_entry);
   1863	}
   1864	return ret;
   1865}
   1866
   1867static void ehea_allmulti(struct net_device *dev, int enable)
   1868{
   1869	struct ehea_port *port = netdev_priv(dev);
   1870	u64 hret;
   1871
   1872	if (!port->allmulti) {
   1873		if (enable) {
   1874			/* Enable ALLMULTI */
   1875			ehea_drop_multicast_list(dev);
   1876			hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
   1877			if (!hret)
   1878				port->allmulti = 1;
   1879			else
   1880				netdev_err(dev,
   1881					   "failed enabling IFF_ALLMULTI\n");
   1882		}
   1883	} else {
   1884		if (!enable) {
   1885			/* Disable ALLMULTI */
   1886			hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
   1887			if (!hret)
   1888				port->allmulti = 0;
   1889			else
   1890				netdev_err(dev,
   1891					   "failed disabling IFF_ALLMULTI\n");
   1892		}
   1893	}
   1894}
   1895
   1896static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
   1897{
   1898	struct ehea_mc_list *ehea_mcl_entry;
   1899	u64 hret;
   1900
   1901	ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
   1902	if (!ehea_mcl_entry)
   1903		return;
   1904
   1905	INIT_LIST_HEAD(&ehea_mcl_entry->list);
   1906
   1907	memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
   1908
   1909	hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
   1910					 H_REG_BCMC);
   1911	if (!hret)
   1912		list_add(&ehea_mcl_entry->list, &port->mc_list->list);
   1913	else {
   1914		pr_err("failed registering mcast MAC\n");
   1915		kfree(ehea_mcl_entry);
   1916	}
   1917}
   1918
   1919static void ehea_set_multicast_list(struct net_device *dev)
   1920{
   1921	struct ehea_port *port = netdev_priv(dev);
   1922	struct netdev_hw_addr *ha;
   1923	int ret;
   1924
   1925	ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
   1926
   1927	if (dev->flags & IFF_ALLMULTI) {
   1928		ehea_allmulti(dev, 1);
   1929		goto out;
   1930	}
   1931	ehea_allmulti(dev, 0);
   1932
   1933	if (!netdev_mc_empty(dev)) {
   1934		ret = ehea_drop_multicast_list(dev);
   1935		if (ret) {
   1936			/* Dropping the current multicast list failed.
   1937			 * Enabling ALL_MULTI is the best we can do.
   1938			 */
   1939			ehea_allmulti(dev, 1);
   1940		}
   1941
   1942		if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
   1943			pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
   1944				port->adapter->max_mc_mac);
   1945			goto out;
   1946		}
   1947
   1948		netdev_for_each_mc_addr(ha, dev)
   1949			ehea_add_multicast_entry(port, ha->addr);
   1950
   1951	}
   1952out:
   1953	ehea_update_bcmc_registrations();
   1954}
   1955
   1956static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
   1957{
   1958	swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
   1959
   1960	if (vlan_get_protocol(skb) != htons(ETH_P_IP))
   1961		return;
   1962
   1963	if (skb->ip_summed == CHECKSUM_PARTIAL)
   1964		swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
   1965
   1966	swqe->ip_start = skb_network_offset(skb);
   1967	swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
   1968
   1969	switch (ip_hdr(skb)->protocol) {
   1970	case IPPROTO_UDP:
   1971		if (skb->ip_summed == CHECKSUM_PARTIAL)
   1972			swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
   1973
   1974		swqe->tcp_offset = swqe->ip_end + 1 +
   1975				   offsetof(struct udphdr, check);
   1976		break;
   1977
   1978	case IPPROTO_TCP:
   1979		if (skb->ip_summed == CHECKSUM_PARTIAL)
   1980			swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
   1981
   1982		swqe->tcp_offset = swqe->ip_end + 1 +
   1983				   offsetof(struct tcphdr, check);
   1984		break;
   1985	}
   1986}
   1987
   1988static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
   1989		       struct ehea_swqe *swqe, u32 lkey)
   1990{
   1991	swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
   1992
   1993	xmit_common(skb, swqe);
   1994
   1995	write_swqe2_data(skb, dev, swqe, lkey);
   1996}
   1997
   1998static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
   1999		       struct ehea_swqe *swqe)
   2000{
   2001	u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
   2002
   2003	xmit_common(skb, swqe);
   2004
   2005	if (!skb->data_len)
   2006		skb_copy_from_linear_data(skb, imm_data, skb->len);
   2007	else
   2008		skb_copy_bits(skb, 0, imm_data, skb->len);
   2009
   2010	swqe->immediate_data_length = skb->len;
   2011	dev_consume_skb_any(skb);
   2012}
   2013
   2014static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
   2015{
   2016	struct ehea_port *port = netdev_priv(dev);
   2017	struct ehea_swqe *swqe;
   2018	u32 lkey;
   2019	int swqe_index;
   2020	struct ehea_port_res *pr;
   2021	struct netdev_queue *txq;
   2022
   2023	pr = &port->port_res[skb_get_queue_mapping(skb)];
   2024	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
   2025
   2026	swqe = ehea_get_swqe(pr->qp, &swqe_index);
   2027	memset(swqe, 0, SWQE_HEADER_SIZE);
   2028	atomic_dec(&pr->swqe_avail);
   2029
   2030	if (skb_vlan_tag_present(skb)) {
   2031		swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
   2032		swqe->vlan_tag = skb_vlan_tag_get(skb);
   2033	}
   2034
   2035	pr->tx_packets++;
   2036	pr->tx_bytes += skb->len;
   2037
   2038	if (skb->len <= SWQE3_MAX_IMM) {
   2039		u32 sig_iv = port->sig_comp_iv;
   2040		u32 swqe_num = pr->swqe_id_counter;
   2041		ehea_xmit3(skb, dev, swqe);
   2042		swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
   2043			| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
   2044		if (pr->swqe_ll_count >= (sig_iv - 1)) {
   2045			swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
   2046						      sig_iv);
   2047			swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
   2048			pr->swqe_ll_count = 0;
   2049		} else
   2050			pr->swqe_ll_count += 1;
   2051	} else {
   2052		swqe->wr_id =
   2053			EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
   2054		      | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
   2055		      | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
   2056		      | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
   2057		pr->sq_skba.arr[pr->sq_skba.index] = skb;
   2058
   2059		pr->sq_skba.index++;
   2060		pr->sq_skba.index &= (pr->sq_skba.len - 1);
   2061
   2062		lkey = pr->send_mr.lkey;
   2063		ehea_xmit2(skb, dev, swqe, lkey);
   2064		swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
   2065	}
   2066	pr->swqe_id_counter += 1;
   2067
   2068	netif_info(port, tx_queued, dev,
   2069		   "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
   2070	if (netif_msg_tx_queued(port))
   2071		ehea_dump(swqe, 512, "swqe");
   2072
   2073	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
   2074		netif_tx_stop_queue(txq);
   2075		swqe->tx_control |= EHEA_SWQE_PURGE;
   2076	}
   2077
   2078	ehea_post_swqe(pr->qp, swqe);
   2079
   2080	if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
   2081		pr->p_stats.queue_stopped++;
   2082		netif_tx_stop_queue(txq);
   2083	}
   2084
   2085	return NETDEV_TX_OK;
   2086}
   2087
   2088static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
   2089{
   2090	struct ehea_port *port = netdev_priv(dev);
   2091	struct ehea_adapter *adapter = port->adapter;
   2092	struct hcp_ehea_port_cb1 *cb1;
   2093	int index;
   2094	u64 hret;
   2095	int err = 0;
   2096
   2097	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
   2098	if (!cb1) {
   2099		pr_err("no mem for cb1\n");
   2100		err = -ENOMEM;
   2101		goto out;
   2102	}
   2103
   2104	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
   2105				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
   2106	if (hret != H_SUCCESS) {
   2107		pr_err("query_ehea_port failed\n");
   2108		err = -EINVAL;
   2109		goto out;
   2110	}
   2111
   2112	index = (vid / 64);
   2113	cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
   2114
   2115	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
   2116				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
   2117	if (hret != H_SUCCESS) {
   2118		pr_err("modify_ehea_port failed\n");
   2119		err = -EINVAL;
   2120	}
   2121out:
   2122	free_page((unsigned long)cb1);
   2123	return err;
   2124}
   2125
   2126static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
   2127{
   2128	struct ehea_port *port = netdev_priv(dev);
   2129	struct ehea_adapter *adapter = port->adapter;
   2130	struct hcp_ehea_port_cb1 *cb1;
   2131	int index;
   2132	u64 hret;
   2133	int err = 0;
   2134
   2135	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
   2136	if (!cb1) {
   2137		pr_err("no mem for cb1\n");
   2138		err = -ENOMEM;
   2139		goto out;
   2140	}
   2141
   2142	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
   2143				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
   2144	if (hret != H_SUCCESS) {
   2145		pr_err("query_ehea_port failed\n");
   2146		err = -EINVAL;
   2147		goto out;
   2148	}
   2149
   2150	index = (vid / 64);
   2151	cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
   2152
   2153	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
   2154				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
   2155	if (hret != H_SUCCESS) {
   2156		pr_err("modify_ehea_port failed\n");
   2157		err = -EINVAL;
   2158	}
   2159out:
   2160	free_page((unsigned long)cb1);
   2161	return err;
   2162}
   2163
   2164static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
   2165{
   2166	int ret = -EIO;
   2167	u64 hret;
   2168	u16 dummy16 = 0;
   2169	u64 dummy64 = 0;
   2170	struct hcp_modify_qp_cb0 *cb0;
   2171
   2172	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
   2173	if (!cb0) {
   2174		ret = -ENOMEM;
   2175		goto out;
   2176	}
   2177
   2178	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2179				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
   2180	if (hret != H_SUCCESS) {
   2181		pr_err("query_ehea_qp failed (1)\n");
   2182		goto out;
   2183	}
   2184
   2185	cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
   2186	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2187				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
   2188				     &dummy64, &dummy64, &dummy16, &dummy16);
   2189	if (hret != H_SUCCESS) {
   2190		pr_err("modify_ehea_qp failed (1)\n");
   2191		goto out;
   2192	}
   2193
   2194	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2195				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
   2196	if (hret != H_SUCCESS) {
   2197		pr_err("query_ehea_qp failed (2)\n");
   2198		goto out;
   2199	}
   2200
   2201	cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
   2202	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2203				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
   2204				     &dummy64, &dummy64, &dummy16, &dummy16);
   2205	if (hret != H_SUCCESS) {
   2206		pr_err("modify_ehea_qp failed (2)\n");
   2207		goto out;
   2208	}
   2209
   2210	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2211				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
   2212	if (hret != H_SUCCESS) {
   2213		pr_err("query_ehea_qp failed (3)\n");
   2214		goto out;
   2215	}
   2216
   2217	cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
   2218	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2219				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
   2220				     &dummy64, &dummy64, &dummy16, &dummy16);
   2221	if (hret != H_SUCCESS) {
   2222		pr_err("modify_ehea_qp failed (3)\n");
   2223		goto out;
   2224	}
   2225
   2226	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2227				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
   2228	if (hret != H_SUCCESS) {
   2229		pr_err("query_ehea_qp failed (4)\n");
   2230		goto out;
   2231	}
   2232
   2233	ret = 0;
   2234out:
   2235	free_page((unsigned long)cb0);
   2236	return ret;
   2237}
   2238
   2239static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
   2240{
   2241	int ret, i;
   2242	struct port_res_cfg pr_cfg, pr_cfg_small_rx;
   2243	enum ehea_eq_type eq_type = EHEA_EQ;
   2244
   2245	port->qp_eq = ehea_create_eq(port->adapter, eq_type,
   2246				   EHEA_MAX_ENTRIES_EQ, 1);
   2247	if (!port->qp_eq) {
   2248		ret = -EINVAL;
   2249		pr_err("ehea_create_eq failed (qp_eq)\n");
   2250		goto out_kill_eq;
   2251	}
   2252
   2253	pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
   2254	pr_cfg.max_entries_scq = sq_entries * 2;
   2255	pr_cfg.max_entries_sq = sq_entries;
   2256	pr_cfg.max_entries_rq1 = rq1_entries;
   2257	pr_cfg.max_entries_rq2 = rq2_entries;
   2258	pr_cfg.max_entries_rq3 = rq3_entries;
   2259
   2260	pr_cfg_small_rx.max_entries_rcq = 1;
   2261	pr_cfg_small_rx.max_entries_scq = sq_entries;
   2262	pr_cfg_small_rx.max_entries_sq = sq_entries;
   2263	pr_cfg_small_rx.max_entries_rq1 = 1;
   2264	pr_cfg_small_rx.max_entries_rq2 = 1;
   2265	pr_cfg_small_rx.max_entries_rq3 = 1;
   2266
   2267	for (i = 0; i < def_qps; i++) {
   2268		ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
   2269		if (ret)
   2270			goto out_clean_pr;
   2271	}
   2272	for (i = def_qps; i < def_qps; i++) {
   2273		ret = ehea_init_port_res(port, &port->port_res[i],
   2274					 &pr_cfg_small_rx, i);
   2275		if (ret)
   2276			goto out_clean_pr;
   2277	}
   2278
   2279	return 0;
   2280
   2281out_clean_pr:
   2282	while (--i >= 0)
   2283		ehea_clean_portres(port, &port->port_res[i]);
   2284
   2285out_kill_eq:
   2286	ehea_destroy_eq(port->qp_eq);
   2287	return ret;
   2288}
   2289
   2290static int ehea_clean_all_portres(struct ehea_port *port)
   2291{
   2292	int ret = 0;
   2293	int i;
   2294
   2295	for (i = 0; i < port->num_def_qps; i++)
   2296		ret |= ehea_clean_portres(port, &port->port_res[i]);
   2297
   2298	ret |= ehea_destroy_eq(port->qp_eq);
   2299
   2300	return ret;
   2301}
   2302
   2303static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
   2304{
   2305	if (adapter->active_ports)
   2306		return;
   2307
   2308	ehea_rem_mr(&adapter->mr);
   2309}
   2310
   2311static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
   2312{
   2313	if (adapter->active_ports)
   2314		return 0;
   2315
   2316	return ehea_reg_kernel_mr(adapter, &adapter->mr);
   2317}
   2318
   2319static int ehea_up(struct net_device *dev)
   2320{
   2321	int ret, i;
   2322	struct ehea_port *port = netdev_priv(dev);
   2323
   2324	if (port->state == EHEA_PORT_UP)
   2325		return 0;
   2326
   2327	ret = ehea_port_res_setup(port, port->num_def_qps);
   2328	if (ret) {
   2329		netdev_err(dev, "port_res_failed\n");
   2330		goto out;
   2331	}
   2332
   2333	/* Set default QP for this port */
   2334	ret = ehea_configure_port(port);
   2335	if (ret) {
   2336		netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
   2337		goto out_clean_pr;
   2338	}
   2339
   2340	ret = ehea_reg_interrupts(dev);
   2341	if (ret) {
   2342		netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
   2343		goto out_clean_pr;
   2344	}
   2345
   2346	for (i = 0; i < port->num_def_qps; i++) {
   2347		ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
   2348		if (ret) {
   2349			netdev_err(dev, "activate_qp failed\n");
   2350			goto out_free_irqs;
   2351		}
   2352	}
   2353
   2354	for (i = 0; i < port->num_def_qps; i++) {
   2355		ret = ehea_fill_port_res(&port->port_res[i]);
   2356		if (ret) {
   2357			netdev_err(dev, "out_free_irqs\n");
   2358			goto out_free_irqs;
   2359		}
   2360	}
   2361
   2362	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
   2363	if (ret) {
   2364		ret = -EIO;
   2365		goto out_free_irqs;
   2366	}
   2367
   2368	port->state = EHEA_PORT_UP;
   2369
   2370	ret = 0;
   2371	goto out;
   2372
   2373out_free_irqs:
   2374	ehea_free_interrupts(dev);
   2375
   2376out_clean_pr:
   2377	ehea_clean_all_portres(port);
   2378out:
   2379	if (ret)
   2380		netdev_info(dev, "Failed starting. ret=%i\n", ret);
   2381
   2382	ehea_update_bcmc_registrations();
   2383	ehea_update_firmware_handles();
   2384
   2385	return ret;
   2386}
   2387
   2388static void port_napi_disable(struct ehea_port *port)
   2389{
   2390	int i;
   2391
   2392	for (i = 0; i < port->num_def_qps; i++)
   2393		napi_disable(&port->port_res[i].napi);
   2394}
   2395
   2396static void port_napi_enable(struct ehea_port *port)
   2397{
   2398	int i;
   2399
   2400	for (i = 0; i < port->num_def_qps; i++)
   2401		napi_enable(&port->port_res[i].napi);
   2402}
   2403
   2404static int ehea_open(struct net_device *dev)
   2405{
   2406	int ret;
   2407	struct ehea_port *port = netdev_priv(dev);
   2408
   2409	mutex_lock(&port->port_lock);
   2410
   2411	netif_info(port, ifup, dev, "enabling port\n");
   2412
   2413	netif_carrier_off(dev);
   2414
   2415	ret = ehea_up(dev);
   2416	if (!ret) {
   2417		port_napi_enable(port);
   2418		netif_tx_start_all_queues(dev);
   2419	}
   2420
   2421	mutex_unlock(&port->port_lock);
   2422	schedule_delayed_work(&port->stats_work,
   2423			      round_jiffies_relative(msecs_to_jiffies(1000)));
   2424
   2425	return ret;
   2426}
   2427
   2428static int ehea_down(struct net_device *dev)
   2429{
   2430	int ret;
   2431	struct ehea_port *port = netdev_priv(dev);
   2432
   2433	if (port->state == EHEA_PORT_DOWN)
   2434		return 0;
   2435
   2436	ehea_drop_multicast_list(dev);
   2437	ehea_allmulti(dev, 0);
   2438	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
   2439
   2440	ehea_free_interrupts(dev);
   2441
   2442	port->state = EHEA_PORT_DOWN;
   2443
   2444	ehea_update_bcmc_registrations();
   2445
   2446	ret = ehea_clean_all_portres(port);
   2447	if (ret)
   2448		netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
   2449
   2450	ehea_update_firmware_handles();
   2451
   2452	return ret;
   2453}
   2454
   2455static int ehea_stop(struct net_device *dev)
   2456{
   2457	int ret;
   2458	struct ehea_port *port = netdev_priv(dev);
   2459
   2460	netif_info(port, ifdown, dev, "disabling port\n");
   2461
   2462	set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
   2463	cancel_work_sync(&port->reset_task);
   2464	cancel_delayed_work_sync(&port->stats_work);
   2465	mutex_lock(&port->port_lock);
   2466	netif_tx_stop_all_queues(dev);
   2467	port_napi_disable(port);
   2468	ret = ehea_down(dev);
   2469	mutex_unlock(&port->port_lock);
   2470	clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
   2471	return ret;
   2472}
   2473
   2474static void ehea_purge_sq(struct ehea_qp *orig_qp)
   2475{
   2476	struct ehea_qp qp = *orig_qp;
   2477	struct ehea_qp_init_attr *init_attr = &qp.init_attr;
   2478	struct ehea_swqe *swqe;
   2479	int wqe_index;
   2480	int i;
   2481
   2482	for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
   2483		swqe = ehea_get_swqe(&qp, &wqe_index);
   2484		swqe->tx_control |= EHEA_SWQE_PURGE;
   2485	}
   2486}
   2487
   2488static void ehea_flush_sq(struct ehea_port *port)
   2489{
   2490	int i;
   2491
   2492	for (i = 0; i < port->num_def_qps; i++) {
   2493		struct ehea_port_res *pr = &port->port_res[i];
   2494		int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
   2495		int ret;
   2496
   2497		ret = wait_event_timeout(port->swqe_avail_wq,
   2498			 atomic_read(&pr->swqe_avail) >= swqe_max,
   2499			 msecs_to_jiffies(100));
   2500
   2501		if (!ret) {
   2502			pr_err("WARNING: sq not flushed completely\n");
   2503			break;
   2504		}
   2505	}
   2506}
   2507
   2508static int ehea_stop_qps(struct net_device *dev)
   2509{
   2510	struct ehea_port *port = netdev_priv(dev);
   2511	struct ehea_adapter *adapter = port->adapter;
   2512	struct hcp_modify_qp_cb0 *cb0;
   2513	int ret = -EIO;
   2514	int dret;
   2515	int i;
   2516	u64 hret;
   2517	u64 dummy64 = 0;
   2518	u16 dummy16 = 0;
   2519
   2520	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
   2521	if (!cb0) {
   2522		ret = -ENOMEM;
   2523		goto out;
   2524	}
   2525
   2526	for (i = 0; i < (port->num_def_qps); i++) {
   2527		struct ehea_port_res *pr =  &port->port_res[i];
   2528		struct ehea_qp *qp = pr->qp;
   2529
   2530		/* Purge send queue */
   2531		ehea_purge_sq(qp);
   2532
   2533		/* Disable queue pair */
   2534		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2535					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
   2536					    cb0);
   2537		if (hret != H_SUCCESS) {
   2538			pr_err("query_ehea_qp failed (1)\n");
   2539			goto out;
   2540		}
   2541
   2542		cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
   2543		cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
   2544
   2545		hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2546					     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
   2547							    1), cb0, &dummy64,
   2548					     &dummy64, &dummy16, &dummy16);
   2549		if (hret != H_SUCCESS) {
   2550			pr_err("modify_ehea_qp failed (1)\n");
   2551			goto out;
   2552		}
   2553
   2554		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2555					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
   2556					    cb0);
   2557		if (hret != H_SUCCESS) {
   2558			pr_err("query_ehea_qp failed (2)\n");
   2559			goto out;
   2560		}
   2561
   2562		/* deregister shared memory regions */
   2563		dret = ehea_rem_smrs(pr);
   2564		if (dret) {
   2565			pr_err("unreg shared memory region failed\n");
   2566			goto out;
   2567		}
   2568	}
   2569
   2570	ret = 0;
   2571out:
   2572	free_page((unsigned long)cb0);
   2573
   2574	return ret;
   2575}
   2576
   2577static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
   2578{
   2579	struct ehea_qp qp = *orig_qp;
   2580	struct ehea_qp_init_attr *init_attr = &qp.init_attr;
   2581	struct ehea_rwqe *rwqe;
   2582	struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
   2583	struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
   2584	struct sk_buff *skb;
   2585	u32 lkey = pr->recv_mr.lkey;
   2586
   2587
   2588	int i;
   2589	int index;
   2590
   2591	for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
   2592		rwqe = ehea_get_next_rwqe(&qp, 2);
   2593		rwqe->sg_list[0].l_key = lkey;
   2594		index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
   2595		skb = skba_rq2[index];
   2596		if (skb)
   2597			rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
   2598	}
   2599
   2600	for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
   2601		rwqe = ehea_get_next_rwqe(&qp, 3);
   2602		rwqe->sg_list[0].l_key = lkey;
   2603		index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
   2604		skb = skba_rq3[index];
   2605		if (skb)
   2606			rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
   2607	}
   2608}
   2609
   2610static int ehea_restart_qps(struct net_device *dev)
   2611{
   2612	struct ehea_port *port = netdev_priv(dev);
   2613	struct ehea_adapter *adapter = port->adapter;
   2614	int ret = 0;
   2615	int i;
   2616
   2617	struct hcp_modify_qp_cb0 *cb0;
   2618	u64 hret;
   2619	u64 dummy64 = 0;
   2620	u16 dummy16 = 0;
   2621
   2622	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
   2623	if (!cb0)
   2624		return -ENOMEM;
   2625
   2626	for (i = 0; i < (port->num_def_qps); i++) {
   2627		struct ehea_port_res *pr =  &port->port_res[i];
   2628		struct ehea_qp *qp = pr->qp;
   2629
   2630		ret = ehea_gen_smrs(pr);
   2631		if (ret) {
   2632			netdev_err(dev, "creation of shared memory regions failed\n");
   2633			goto out;
   2634		}
   2635
   2636		ehea_update_rqs(qp, pr);
   2637
   2638		/* Enable queue pair */
   2639		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2640					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
   2641					    cb0);
   2642		if (hret != H_SUCCESS) {
   2643			netdev_err(dev, "query_ehea_qp failed (1)\n");
   2644			ret = -EFAULT;
   2645			goto out;
   2646		}
   2647
   2648		cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
   2649		cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
   2650
   2651		hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2652					     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
   2653							    1), cb0, &dummy64,
   2654					     &dummy64, &dummy16, &dummy16);
   2655		if (hret != H_SUCCESS) {
   2656			netdev_err(dev, "modify_ehea_qp failed (1)\n");
   2657			ret = -EFAULT;
   2658			goto out;
   2659		}
   2660
   2661		hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
   2662					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
   2663					    cb0);
   2664		if (hret != H_SUCCESS) {
   2665			netdev_err(dev, "query_ehea_qp failed (2)\n");
   2666			ret = -EFAULT;
   2667			goto out;
   2668		}
   2669
   2670		/* refill entire queue */
   2671		ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
   2672		ehea_refill_rq2(pr, 0);
   2673		ehea_refill_rq3(pr, 0);
   2674	}
   2675out:
   2676	free_page((unsigned long)cb0);
   2677
   2678	return ret;
   2679}
   2680
   2681static void ehea_reset_port(struct work_struct *work)
   2682{
   2683	int ret;
   2684	struct ehea_port *port =
   2685		container_of(work, struct ehea_port, reset_task);
   2686	struct net_device *dev = port->netdev;
   2687
   2688	mutex_lock(&dlpar_mem_lock);
   2689	port->resets++;
   2690	mutex_lock(&port->port_lock);
   2691	netif_tx_disable(dev);
   2692
   2693	port_napi_disable(port);
   2694
   2695	ehea_down(dev);
   2696
   2697	ret = ehea_up(dev);
   2698	if (ret)
   2699		goto out;
   2700
   2701	ehea_set_multicast_list(dev);
   2702
   2703	netif_info(port, timer, dev, "reset successful\n");
   2704
   2705	port_napi_enable(port);
   2706
   2707	netif_tx_wake_all_queues(dev);
   2708out:
   2709	mutex_unlock(&port->port_lock);
   2710	mutex_unlock(&dlpar_mem_lock);
   2711}
   2712
   2713static void ehea_rereg_mrs(void)
   2714{
   2715	int ret, i;
   2716	struct ehea_adapter *adapter;
   2717
   2718	pr_info("LPAR memory changed - re-initializing driver\n");
   2719
   2720	list_for_each_entry(adapter, &adapter_list, list)
   2721		if (adapter->active_ports) {
   2722			/* Shutdown all ports */
   2723			for (i = 0; i < EHEA_MAX_PORTS; i++) {
   2724				struct ehea_port *port = adapter->port[i];
   2725				struct net_device *dev;
   2726
   2727				if (!port)
   2728					continue;
   2729
   2730				dev = port->netdev;
   2731
   2732				if (dev->flags & IFF_UP) {
   2733					mutex_lock(&port->port_lock);
   2734					netif_tx_disable(dev);
   2735					ehea_flush_sq(port);
   2736					ret = ehea_stop_qps(dev);
   2737					if (ret) {
   2738						mutex_unlock(&port->port_lock);
   2739						goto out;
   2740					}
   2741					port_napi_disable(port);
   2742					mutex_unlock(&port->port_lock);
   2743				}
   2744				reset_sq_restart_flag(port);
   2745			}
   2746
   2747			/* Unregister old memory region */
   2748			ret = ehea_rem_mr(&adapter->mr);
   2749			if (ret) {
   2750				pr_err("unregister MR failed - driver inoperable!\n");
   2751				goto out;
   2752			}
   2753		}
   2754
   2755	clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
   2756
   2757	list_for_each_entry(adapter, &adapter_list, list)
   2758		if (adapter->active_ports) {
   2759			/* Register new memory region */
   2760			ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
   2761			if (ret) {
   2762				pr_err("register MR failed - driver inoperable!\n");
   2763				goto out;
   2764			}
   2765
   2766			/* Restart all ports */
   2767			for (i = 0; i < EHEA_MAX_PORTS; i++) {
   2768				struct ehea_port *port = adapter->port[i];
   2769
   2770				if (port) {
   2771					struct net_device *dev = port->netdev;
   2772
   2773					if (dev->flags & IFF_UP) {
   2774						mutex_lock(&port->port_lock);
   2775						ret = ehea_restart_qps(dev);
   2776						if (!ret) {
   2777							check_sqs(port);
   2778							port_napi_enable(port);
   2779							netif_tx_wake_all_queues(dev);
   2780						} else {
   2781							netdev_err(dev, "Unable to restart QPS\n");
   2782						}
   2783						mutex_unlock(&port->port_lock);
   2784					}
   2785				}
   2786			}
   2787		}
   2788	pr_info("re-initializing driver complete\n");
   2789out:
   2790	return;
   2791}
   2792
   2793static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
   2794{
   2795	struct ehea_port *port = netdev_priv(dev);
   2796
   2797	if (netif_carrier_ok(dev) &&
   2798	    !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
   2799		ehea_schedule_port_reset(port);
   2800}
   2801
   2802static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
   2803{
   2804	struct hcp_query_ehea *cb;
   2805	u64 hret;
   2806	int ret;
   2807
   2808	cb = (void *)get_zeroed_page(GFP_KERNEL);
   2809	if (!cb) {
   2810		ret = -ENOMEM;
   2811		goto out;
   2812	}
   2813
   2814	hret = ehea_h_query_ehea(adapter->handle, cb);
   2815
   2816	if (hret != H_SUCCESS) {
   2817		ret = -EIO;
   2818		goto out_herr;
   2819	}
   2820
   2821	adapter->max_mc_mac = cb->max_mc_mac - 1;
   2822	ret = 0;
   2823
   2824out_herr:
   2825	free_page((unsigned long)cb);
   2826out:
   2827	return ret;
   2828}
   2829
   2830static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
   2831{
   2832	struct hcp_ehea_port_cb4 *cb4;
   2833	u64 hret;
   2834	int ret = 0;
   2835
   2836	*jumbo = 0;
   2837
   2838	/* (Try to) enable *jumbo frames */
   2839	cb4 = (void *)get_zeroed_page(GFP_KERNEL);
   2840	if (!cb4) {
   2841		pr_err("no mem for cb4\n");
   2842		ret = -ENOMEM;
   2843		goto out;
   2844	} else {
   2845		hret = ehea_h_query_ehea_port(port->adapter->handle,
   2846					      port->logical_port_id,
   2847					      H_PORT_CB4,
   2848					      H_PORT_CB4_JUMBO, cb4);
   2849		if (hret == H_SUCCESS) {
   2850			if (cb4->jumbo_frame)
   2851				*jumbo = 1;
   2852			else {
   2853				cb4->jumbo_frame = 1;
   2854				hret = ehea_h_modify_ehea_port(port->adapter->
   2855							       handle,
   2856							       port->
   2857							       logical_port_id,
   2858							       H_PORT_CB4,
   2859							       H_PORT_CB4_JUMBO,
   2860							       cb4);
   2861				if (hret == H_SUCCESS)
   2862					*jumbo = 1;
   2863			}
   2864		} else
   2865			ret = -EINVAL;
   2866
   2867		free_page((unsigned long)cb4);
   2868	}
   2869out:
   2870	return ret;
   2871}
   2872
   2873static ssize_t log_port_id_show(struct device *dev,
   2874				struct device_attribute *attr, char *buf)
   2875{
   2876	struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
   2877	return sprintf(buf, "%d", port->logical_port_id);
   2878}
   2879
   2880static DEVICE_ATTR_RO(log_port_id);
   2881
   2882static void logical_port_release(struct device *dev)
   2883{
   2884	struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
   2885	of_node_put(port->ofdev.dev.of_node);
   2886}
   2887
   2888static struct device *ehea_register_port(struct ehea_port *port,
   2889					 struct device_node *dn)
   2890{
   2891	int ret;
   2892
   2893	port->ofdev.dev.of_node = of_node_get(dn);
   2894	port->ofdev.dev.parent = &port->adapter->ofdev->dev;
   2895	port->ofdev.dev.bus = &ibmebus_bus_type;
   2896
   2897	dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
   2898	port->ofdev.dev.release = logical_port_release;
   2899
   2900	ret = of_device_register(&port->ofdev);
   2901	if (ret) {
   2902		pr_err("failed to register device. ret=%d\n", ret);
   2903		goto out;
   2904	}
   2905
   2906	ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
   2907	if (ret) {
   2908		pr_err("failed to register attributes, ret=%d\n", ret);
   2909		goto out_unreg_of_dev;
   2910	}
   2911
   2912	return &port->ofdev.dev;
   2913
   2914out_unreg_of_dev:
   2915	of_device_unregister(&port->ofdev);
   2916out:
   2917	return NULL;
   2918}
   2919
   2920static void ehea_unregister_port(struct ehea_port *port)
   2921{
   2922	device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
   2923	of_device_unregister(&port->ofdev);
   2924}
   2925
   2926static const struct net_device_ops ehea_netdev_ops = {
   2927	.ndo_open		= ehea_open,
   2928	.ndo_stop		= ehea_stop,
   2929	.ndo_start_xmit		= ehea_start_xmit,
   2930	.ndo_get_stats64	= ehea_get_stats64,
   2931	.ndo_set_mac_address	= ehea_set_mac_addr,
   2932	.ndo_validate_addr	= eth_validate_addr,
   2933	.ndo_set_rx_mode	= ehea_set_multicast_list,
   2934	.ndo_vlan_rx_add_vid	= ehea_vlan_rx_add_vid,
   2935	.ndo_vlan_rx_kill_vid	= ehea_vlan_rx_kill_vid,
   2936	.ndo_tx_timeout		= ehea_tx_watchdog,
   2937};
   2938
   2939static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
   2940					 u32 logical_port_id,
   2941					 struct device_node *dn)
   2942{
   2943	int ret;
   2944	struct net_device *dev;
   2945	struct ehea_port *port;
   2946	struct device *port_dev;
   2947	int jumbo;
   2948
   2949	/* allocate memory for the port structures */
   2950	dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
   2951
   2952	if (!dev) {
   2953		ret = -ENOMEM;
   2954		goto out_err;
   2955	}
   2956
   2957	port = netdev_priv(dev);
   2958
   2959	mutex_init(&port->port_lock);
   2960	port->state = EHEA_PORT_DOWN;
   2961	port->sig_comp_iv = sq_entries / 10;
   2962
   2963	port->adapter = adapter;
   2964	port->netdev = dev;
   2965	port->logical_port_id = logical_port_id;
   2966
   2967	port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
   2968
   2969	port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
   2970	if (!port->mc_list) {
   2971		ret = -ENOMEM;
   2972		goto out_free_ethdev;
   2973	}
   2974
   2975	INIT_LIST_HEAD(&port->mc_list->list);
   2976
   2977	ret = ehea_sense_port_attr(port);
   2978	if (ret)
   2979		goto out_free_mc_list;
   2980
   2981	netif_set_real_num_rx_queues(dev, port->num_def_qps);
   2982	netif_set_real_num_tx_queues(dev, port->num_def_qps);
   2983
   2984	port_dev = ehea_register_port(port, dn);
   2985	if (!port_dev)
   2986		goto out_free_mc_list;
   2987
   2988	SET_NETDEV_DEV(dev, port_dev);
   2989
   2990	/* initialize net_device structure */
   2991	eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
   2992
   2993	dev->netdev_ops = &ehea_netdev_ops;
   2994	ehea_set_ethtool_ops(dev);
   2995
   2996	dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
   2997		      NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
   2998	dev->features = NETIF_F_SG | NETIF_F_TSO |
   2999		      NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
   3000		      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
   3001		      NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
   3002	dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
   3003			NETIF_F_IP_CSUM;
   3004	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
   3005
   3006	/* MTU range: 68 - 9022 */
   3007	dev->min_mtu = ETH_MIN_MTU;
   3008	dev->max_mtu = EHEA_MAX_PACKET_SIZE;
   3009
   3010	INIT_WORK(&port->reset_task, ehea_reset_port);
   3011	INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
   3012
   3013	init_waitqueue_head(&port->swqe_avail_wq);
   3014	init_waitqueue_head(&port->restart_wq);
   3015
   3016	ret = register_netdev(dev);
   3017	if (ret) {
   3018		pr_err("register_netdev failed. ret=%d\n", ret);
   3019		goto out_unreg_port;
   3020	}
   3021
   3022	ret = ehea_get_jumboframe_status(port, &jumbo);
   3023	if (ret)
   3024		netdev_err(dev, "failed determining jumbo frame status\n");
   3025
   3026	netdev_info(dev, "Jumbo frames are %sabled\n",
   3027		    jumbo == 1 ? "en" : "dis");
   3028
   3029	adapter->active_ports++;
   3030
   3031	return port;
   3032
   3033out_unreg_port:
   3034	ehea_unregister_port(port);
   3035
   3036out_free_mc_list:
   3037	kfree(port->mc_list);
   3038
   3039out_free_ethdev:
   3040	free_netdev(dev);
   3041
   3042out_err:
   3043	pr_err("setting up logical port with id=%d failed, ret=%d\n",
   3044	       logical_port_id, ret);
   3045	return NULL;
   3046}
   3047
   3048static void ehea_shutdown_single_port(struct ehea_port *port)
   3049{
   3050	struct ehea_adapter *adapter = port->adapter;
   3051
   3052	cancel_work_sync(&port->reset_task);
   3053	cancel_delayed_work_sync(&port->stats_work);
   3054	unregister_netdev(port->netdev);
   3055	ehea_unregister_port(port);
   3056	kfree(port->mc_list);
   3057	free_netdev(port->netdev);
   3058	adapter->active_ports--;
   3059}
   3060
   3061static int ehea_setup_ports(struct ehea_adapter *adapter)
   3062{
   3063	struct device_node *lhea_dn;
   3064	struct device_node *eth_dn = NULL;
   3065
   3066	const u32 *dn_log_port_id;
   3067	int i = 0;
   3068
   3069	lhea_dn = adapter->ofdev->dev.of_node;
   3070	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
   3071
   3072		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
   3073						 NULL);
   3074		if (!dn_log_port_id) {
   3075			pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
   3076			continue;
   3077		}
   3078
   3079		if (ehea_add_adapter_mr(adapter)) {
   3080			pr_err("creating MR failed\n");
   3081			of_node_put(eth_dn);
   3082			return -EIO;
   3083		}
   3084
   3085		adapter->port[i] = ehea_setup_single_port(adapter,
   3086							  *dn_log_port_id,
   3087							  eth_dn);
   3088		if (adapter->port[i])
   3089			netdev_info(adapter->port[i]->netdev,
   3090				    "logical port id #%d\n", *dn_log_port_id);
   3091		else
   3092			ehea_remove_adapter_mr(adapter);
   3093
   3094		i++;
   3095	}
   3096	return 0;
   3097}
   3098
   3099static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
   3100					   u32 logical_port_id)
   3101{
   3102	struct device_node *lhea_dn;
   3103	struct device_node *eth_dn = NULL;
   3104	const u32 *dn_log_port_id;
   3105
   3106	lhea_dn = adapter->ofdev->dev.of_node;
   3107	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
   3108
   3109		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
   3110						 NULL);
   3111		if (dn_log_port_id)
   3112			if (*dn_log_port_id == logical_port_id)
   3113				return eth_dn;
   3114	}
   3115
   3116	return NULL;
   3117}
   3118
   3119static ssize_t probe_port_store(struct device *dev,
   3120			       struct device_attribute *attr,
   3121			       const char *buf, size_t count)
   3122{
   3123	struct ehea_adapter *adapter = dev_get_drvdata(dev);
   3124	struct ehea_port *port;
   3125	struct device_node *eth_dn = NULL;
   3126	int i;
   3127
   3128	u32 logical_port_id;
   3129
   3130	sscanf(buf, "%d", &logical_port_id);
   3131
   3132	port = ehea_get_port(adapter, logical_port_id);
   3133
   3134	if (port) {
   3135		netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
   3136			    logical_port_id);
   3137		return -EINVAL;
   3138	}
   3139
   3140	eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
   3141
   3142	if (!eth_dn) {
   3143		pr_info("no logical port with id %d found\n", logical_port_id);
   3144		return -EINVAL;
   3145	}
   3146
   3147	if (ehea_add_adapter_mr(adapter)) {
   3148		pr_err("creating MR failed\n");
   3149		of_node_put(eth_dn);
   3150		return -EIO;
   3151	}
   3152
   3153	port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
   3154
   3155	of_node_put(eth_dn);
   3156
   3157	if (port) {
   3158		for (i = 0; i < EHEA_MAX_PORTS; i++)
   3159			if (!adapter->port[i]) {
   3160				adapter->port[i] = port;
   3161				break;
   3162			}
   3163
   3164		netdev_info(port->netdev, "added: (logical port id=%d)\n",
   3165			    logical_port_id);
   3166	} else {
   3167		ehea_remove_adapter_mr(adapter);
   3168		return -EIO;
   3169	}
   3170
   3171	return (ssize_t) count;
   3172}
   3173
   3174static ssize_t remove_port_store(struct device *dev,
   3175				 struct device_attribute *attr,
   3176				 const char *buf, size_t count)
   3177{
   3178	struct ehea_adapter *adapter = dev_get_drvdata(dev);
   3179	struct ehea_port *port;
   3180	int i;
   3181	u32 logical_port_id;
   3182
   3183	sscanf(buf, "%d", &logical_port_id);
   3184
   3185	port = ehea_get_port(adapter, logical_port_id);
   3186
   3187	if (port) {
   3188		netdev_info(port->netdev, "removed: (logical port id=%d)\n",
   3189			    logical_port_id);
   3190
   3191		ehea_shutdown_single_port(port);
   3192
   3193		for (i = 0; i < EHEA_MAX_PORTS; i++)
   3194			if (adapter->port[i] == port) {
   3195				adapter->port[i] = NULL;
   3196				break;
   3197			}
   3198	} else {
   3199		pr_err("removing port with logical port id=%d failed. port not configured.\n",
   3200		       logical_port_id);
   3201		return -EINVAL;
   3202	}
   3203
   3204	ehea_remove_adapter_mr(adapter);
   3205
   3206	return (ssize_t) count;
   3207}
   3208
   3209static DEVICE_ATTR_WO(probe_port);
   3210static DEVICE_ATTR_WO(remove_port);
   3211
   3212static int ehea_create_device_sysfs(struct platform_device *dev)
   3213{
   3214	int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
   3215	if (ret)
   3216		goto out;
   3217
   3218	ret = device_create_file(&dev->dev, &dev_attr_remove_port);
   3219out:
   3220	return ret;
   3221}
   3222
   3223static void ehea_remove_device_sysfs(struct platform_device *dev)
   3224{
   3225	device_remove_file(&dev->dev, &dev_attr_probe_port);
   3226	device_remove_file(&dev->dev, &dev_attr_remove_port);
   3227}
   3228
   3229static int ehea_reboot_notifier(struct notifier_block *nb,
   3230				unsigned long action, void *unused)
   3231{
   3232	if (action == SYS_RESTART) {
   3233		pr_info("Reboot: freeing all eHEA resources\n");
   3234		ibmebus_unregister_driver(&ehea_driver);
   3235	}
   3236	return NOTIFY_DONE;
   3237}
   3238
   3239static struct notifier_block ehea_reboot_nb = {
   3240	.notifier_call = ehea_reboot_notifier,
   3241};
   3242
   3243static int ehea_mem_notifier(struct notifier_block *nb,
   3244			     unsigned long action, void *data)
   3245{
   3246	int ret = NOTIFY_BAD;
   3247	struct memory_notify *arg = data;
   3248
   3249	mutex_lock(&dlpar_mem_lock);
   3250
   3251	switch (action) {
   3252	case MEM_CANCEL_OFFLINE:
   3253		pr_info("memory offlining canceled");
   3254		fallthrough;	/* re-add canceled memory block */
   3255
   3256	case MEM_ONLINE:
   3257		pr_info("memory is going online");
   3258		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
   3259		if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
   3260			goto out_unlock;
   3261		ehea_rereg_mrs();
   3262		break;
   3263
   3264	case MEM_GOING_OFFLINE:
   3265		pr_info("memory is going offline");
   3266		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
   3267		if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
   3268			goto out_unlock;
   3269		ehea_rereg_mrs();
   3270		break;
   3271
   3272	default:
   3273		break;
   3274	}
   3275
   3276	ehea_update_firmware_handles();
   3277	ret = NOTIFY_OK;
   3278
   3279out_unlock:
   3280	mutex_unlock(&dlpar_mem_lock);
   3281	return ret;
   3282}
   3283
   3284static struct notifier_block ehea_mem_nb = {
   3285	.notifier_call = ehea_mem_notifier,
   3286};
   3287
   3288static void ehea_crash_handler(void)
   3289{
   3290	int i;
   3291
   3292	if (ehea_fw_handles.arr)
   3293		for (i = 0; i < ehea_fw_handles.num_entries; i++)
   3294			ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
   3295					     ehea_fw_handles.arr[i].fwh,
   3296					     FORCE_FREE);
   3297
   3298	if (ehea_bcmc_regs.arr)
   3299		for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
   3300			ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
   3301					      ehea_bcmc_regs.arr[i].port_id,
   3302					      ehea_bcmc_regs.arr[i].reg_type,
   3303					      ehea_bcmc_regs.arr[i].macaddr,
   3304					      0, H_DEREG_BCMC);
   3305}
   3306
   3307static atomic_t ehea_memory_hooks_registered;
   3308
   3309/* Register memory hooks on probe of first adapter */
   3310static int ehea_register_memory_hooks(void)
   3311{
   3312	int ret = 0;
   3313
   3314	if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
   3315		return 0;
   3316
   3317	ret = ehea_create_busmap();
   3318	if (ret) {
   3319		pr_info("ehea_create_busmap failed\n");
   3320		goto out;
   3321	}
   3322
   3323	ret = register_reboot_notifier(&ehea_reboot_nb);
   3324	if (ret) {
   3325		pr_info("register_reboot_notifier failed\n");
   3326		goto out;
   3327	}
   3328
   3329	ret = register_memory_notifier(&ehea_mem_nb);
   3330	if (ret) {
   3331		pr_info("register_memory_notifier failed\n");
   3332		goto out2;
   3333	}
   3334
   3335	ret = crash_shutdown_register(ehea_crash_handler);
   3336	if (ret) {
   3337		pr_info("crash_shutdown_register failed\n");
   3338		goto out3;
   3339	}
   3340
   3341	return 0;
   3342
   3343out3:
   3344	unregister_memory_notifier(&ehea_mem_nb);
   3345out2:
   3346	unregister_reboot_notifier(&ehea_reboot_nb);
   3347out:
   3348	atomic_dec(&ehea_memory_hooks_registered);
   3349	return ret;
   3350}
   3351
   3352static void ehea_unregister_memory_hooks(void)
   3353{
   3354	/* Only remove the hooks if we've registered them */
   3355	if (atomic_read(&ehea_memory_hooks_registered) == 0)
   3356		return;
   3357
   3358	unregister_reboot_notifier(&ehea_reboot_nb);
   3359	if (crash_shutdown_unregister(ehea_crash_handler))
   3360		pr_info("failed unregistering crash handler\n");
   3361	unregister_memory_notifier(&ehea_mem_nb);
   3362}
   3363
   3364static int ehea_probe_adapter(struct platform_device *dev)
   3365{
   3366	struct ehea_adapter *adapter;
   3367	const u64 *adapter_handle;
   3368	int ret;
   3369	int i;
   3370
   3371	ret = ehea_register_memory_hooks();
   3372	if (ret)
   3373		return ret;
   3374
   3375	if (!dev || !dev->dev.of_node) {
   3376		pr_err("Invalid ibmebus device probed\n");
   3377		return -EINVAL;
   3378	}
   3379
   3380	adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
   3381	if (!adapter) {
   3382		ret = -ENOMEM;
   3383		dev_err(&dev->dev, "no mem for ehea_adapter\n");
   3384		goto out;
   3385	}
   3386
   3387	list_add(&adapter->list, &adapter_list);
   3388
   3389	adapter->ofdev = dev;
   3390
   3391	adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
   3392					 NULL);
   3393	if (adapter_handle)
   3394		adapter->handle = *adapter_handle;
   3395
   3396	if (!adapter->handle) {
   3397		dev_err(&dev->dev, "failed getting handle for adapter"
   3398			" '%pOF'\n", dev->dev.of_node);
   3399		ret = -ENODEV;
   3400		goto out_free_ad;
   3401	}
   3402
   3403	adapter->pd = EHEA_PD_ID;
   3404
   3405	platform_set_drvdata(dev, adapter);
   3406
   3407
   3408	/* initialize adapter and ports */
   3409	/* get adapter properties */
   3410	ret = ehea_sense_adapter_attr(adapter);
   3411	if (ret) {
   3412		dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
   3413		goto out_free_ad;
   3414	}
   3415
   3416	adapter->neq = ehea_create_eq(adapter,
   3417				      EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
   3418	if (!adapter->neq) {
   3419		ret = -EIO;
   3420		dev_err(&dev->dev, "NEQ creation failed\n");
   3421		goto out_free_ad;
   3422	}
   3423
   3424	tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
   3425
   3426	ret = ehea_create_device_sysfs(dev);
   3427	if (ret)
   3428		goto out_kill_eq;
   3429
   3430	ret = ehea_setup_ports(adapter);
   3431	if (ret) {
   3432		dev_err(&dev->dev, "setup_ports failed\n");
   3433		goto out_rem_dev_sysfs;
   3434	}
   3435
   3436	ret = ibmebus_request_irq(adapter->neq->attr.ist1,
   3437				  ehea_interrupt_neq, 0,
   3438				  "ehea_neq", adapter);
   3439	if (ret) {
   3440		dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
   3441		goto out_shutdown_ports;
   3442	}
   3443
   3444	/* Handle any events that might be pending. */
   3445	tasklet_hi_schedule(&adapter->neq_tasklet);
   3446
   3447	ret = 0;
   3448	goto out;
   3449
   3450out_shutdown_ports:
   3451	for (i = 0; i < EHEA_MAX_PORTS; i++)
   3452		if (adapter->port[i]) {
   3453			ehea_shutdown_single_port(adapter->port[i]);
   3454			adapter->port[i] = NULL;
   3455		}
   3456
   3457out_rem_dev_sysfs:
   3458	ehea_remove_device_sysfs(dev);
   3459
   3460out_kill_eq:
   3461	ehea_destroy_eq(adapter->neq);
   3462
   3463out_free_ad:
   3464	list_del(&adapter->list);
   3465
   3466out:
   3467	ehea_update_firmware_handles();
   3468
   3469	return ret;
   3470}
   3471
   3472static int ehea_remove(struct platform_device *dev)
   3473{
   3474	struct ehea_adapter *adapter = platform_get_drvdata(dev);
   3475	int i;
   3476
   3477	for (i = 0; i < EHEA_MAX_PORTS; i++)
   3478		if (adapter->port[i]) {
   3479			ehea_shutdown_single_port(adapter->port[i]);
   3480			adapter->port[i] = NULL;
   3481		}
   3482
   3483	ehea_remove_device_sysfs(dev);
   3484
   3485	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
   3486	tasklet_kill(&adapter->neq_tasklet);
   3487
   3488	ehea_destroy_eq(adapter->neq);
   3489	ehea_remove_adapter_mr(adapter);
   3490	list_del(&adapter->list);
   3491
   3492	ehea_update_firmware_handles();
   3493
   3494	return 0;
   3495}
   3496
   3497static int check_module_parm(void)
   3498{
   3499	int ret = 0;
   3500
   3501	if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
   3502	    (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
   3503		pr_info("Bad parameter: rq1_entries\n");
   3504		ret = -EINVAL;
   3505	}
   3506	if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
   3507	    (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
   3508		pr_info("Bad parameter: rq2_entries\n");
   3509		ret = -EINVAL;
   3510	}
   3511	if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
   3512	    (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
   3513		pr_info("Bad parameter: rq3_entries\n");
   3514		ret = -EINVAL;
   3515	}
   3516	if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
   3517	    (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
   3518		pr_info("Bad parameter: sq_entries\n");
   3519		ret = -EINVAL;
   3520	}
   3521
   3522	return ret;
   3523}
   3524
   3525static ssize_t capabilities_show(struct device_driver *drv, char *buf)
   3526{
   3527	return sprintf(buf, "%d", EHEA_CAPABILITIES);
   3528}
   3529
   3530static DRIVER_ATTR_RO(capabilities);
   3531
   3532static int __init ehea_module_init(void)
   3533{
   3534	int ret;
   3535
   3536	pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
   3537
   3538	memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
   3539	memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
   3540
   3541	mutex_init(&ehea_fw_handles.lock);
   3542	spin_lock_init(&ehea_bcmc_regs.lock);
   3543
   3544	ret = check_module_parm();
   3545	if (ret)
   3546		goto out;
   3547
   3548	ret = ibmebus_register_driver(&ehea_driver);
   3549	if (ret) {
   3550		pr_err("failed registering eHEA device driver on ebus\n");
   3551		goto out;
   3552	}
   3553
   3554	ret = driver_create_file(&ehea_driver.driver,
   3555				 &driver_attr_capabilities);
   3556	if (ret) {
   3557		pr_err("failed to register capabilities attribute, ret=%d\n",
   3558		       ret);
   3559		goto out2;
   3560	}
   3561
   3562	return ret;
   3563
   3564out2:
   3565	ibmebus_unregister_driver(&ehea_driver);
   3566out:
   3567	return ret;
   3568}
   3569
   3570static void __exit ehea_module_exit(void)
   3571{
   3572	driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
   3573	ibmebus_unregister_driver(&ehea_driver);
   3574	ehea_unregister_memory_hooks();
   3575	kfree(ehea_fw_handles.arr);
   3576	kfree(ehea_bcmc_regs.arr);
   3577	ehea_destroy_busmap();
   3578}
   3579
   3580module_init(ehea_module_init);
   3581module_exit(ehea_module_exit);