cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qeth_core_main.c (192398B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *    Copyright IBM Corp. 2007, 2009
      4 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
      5 *		 Frank Pavlic <fpavlic@de.ibm.com>,
      6 *		 Thomas Spatzier <tspat@de.ibm.com>,
      7 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
      8 */
      9
     10#define KMSG_COMPONENT "qeth"
     11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     12
     13#include <linux/compat.h>
     14#include <linux/module.h>
     15#include <linux/moduleparam.h>
     16#include <linux/string.h>
     17#include <linux/errno.h>
     18#include <linux/kernel.h>
     19#include <linux/log2.h>
     20#include <linux/io.h>
     21#include <linux/ip.h>
     22#include <linux/tcp.h>
     23#include <linux/mii.h>
     24#include <linux/mm.h>
     25#include <linux/kthread.h>
     26#include <linux/slab.h>
     27#include <linux/if_vlan.h>
     28#include <linux/netdevice.h>
     29#include <linux/netdev_features.h>
     30#include <linux/rcutree.h>
     31#include <linux/skbuff.h>
     32#include <linux/vmalloc.h>
     33
     34#include <net/iucv/af_iucv.h>
     35#include <net/dsfield.h>
     36#include <net/sock.h>
     37
     38#include <asm/ebcdic.h>
     39#include <asm/chpid.h>
     40#include <asm/sysinfo.h>
     41#include <asm/diag.h>
     42#include <asm/cio.h>
     43#include <asm/ccwdev.h>
     44#include <asm/cpcmd.h>
     45
     46#include "qeth_core.h"
     47
     48struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
     49	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
     50	/*                   N  P  A    M  L  V                      H  */
     51	[QETH_DBF_SETUP] = {"qeth_setup",
     52				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
     53	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
     54			    &debug_sprintf_view, NULL},
     55	[QETH_DBF_CTRL]  = {"qeth_control",
     56		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
     57};
     58EXPORT_SYMBOL_GPL(qeth_dbf);
     59
     60static struct kmem_cache *qeth_core_header_cache;
     61static struct kmem_cache *qeth_qdio_outbuf_cache;
     62static struct kmem_cache *qeth_qaob_cache;
     63
     64static struct device *qeth_core_root_dev;
     65static struct dentry *qeth_debugfs_root;
     66static struct lock_class_key qdio_out_skb_queue_key;
     67
     68static void qeth_issue_next_read_cb(struct qeth_card *card,
     69				    struct qeth_cmd_buffer *iob,
     70				    unsigned int data_length);
     71static int qeth_qdio_establish(struct qeth_card *);
     72static void qeth_free_qdio_queues(struct qeth_card *card);
     73
     74static const char *qeth_get_cardname(struct qeth_card *card)
     75{
     76	if (IS_VM_NIC(card)) {
     77		switch (card->info.type) {
     78		case QETH_CARD_TYPE_OSD:
     79			return " Virtual NIC QDIO";
     80		case QETH_CARD_TYPE_IQD:
     81			return " Virtual NIC Hiper";
     82		case QETH_CARD_TYPE_OSM:
     83			return " Virtual NIC QDIO - OSM";
     84		case QETH_CARD_TYPE_OSX:
     85			return " Virtual NIC QDIO - OSX";
     86		default:
     87			return " unknown";
     88		}
     89	} else {
     90		switch (card->info.type) {
     91		case QETH_CARD_TYPE_OSD:
     92			return " OSD Express";
     93		case QETH_CARD_TYPE_IQD:
     94			return " HiperSockets";
     95		case QETH_CARD_TYPE_OSM:
     96			return " OSM QDIO";
     97		case QETH_CARD_TYPE_OSX:
     98			return " OSX QDIO";
     99		default:
    100			return " unknown";
    101		}
    102	}
    103	return " n/a";
    104}
    105
    106/* max length to be returned: 14 */
    107const char *qeth_get_cardname_short(struct qeth_card *card)
    108{
    109	if (IS_VM_NIC(card)) {
    110		switch (card->info.type) {
    111		case QETH_CARD_TYPE_OSD:
    112			return "Virt.NIC QDIO";
    113		case QETH_CARD_TYPE_IQD:
    114			return "Virt.NIC Hiper";
    115		case QETH_CARD_TYPE_OSM:
    116			return "Virt.NIC OSM";
    117		case QETH_CARD_TYPE_OSX:
    118			return "Virt.NIC OSX";
    119		default:
    120			return "unknown";
    121		}
    122	} else {
    123		switch (card->info.type) {
    124		case QETH_CARD_TYPE_OSD:
    125			switch (card->info.link_type) {
    126			case QETH_LINK_TYPE_FAST_ETH:
    127				return "OSD_100";
    128			case QETH_LINK_TYPE_HSTR:
    129				return "HSTR";
    130			case QETH_LINK_TYPE_GBIT_ETH:
    131				return "OSD_1000";
    132			case QETH_LINK_TYPE_10GBIT_ETH:
    133				return "OSD_10GIG";
    134			case QETH_LINK_TYPE_25GBIT_ETH:
    135				return "OSD_25GIG";
    136			case QETH_LINK_TYPE_LANE_ETH100:
    137				return "OSD_FE_LANE";
    138			case QETH_LINK_TYPE_LANE_TR:
    139				return "OSD_TR_LANE";
    140			case QETH_LINK_TYPE_LANE_ETH1000:
    141				return "OSD_GbE_LANE";
    142			case QETH_LINK_TYPE_LANE:
    143				return "OSD_ATM_LANE";
    144			default:
    145				return "OSD_Express";
    146			}
    147		case QETH_CARD_TYPE_IQD:
    148			return "HiperSockets";
    149		case QETH_CARD_TYPE_OSM:
    150			return "OSM_1000";
    151		case QETH_CARD_TYPE_OSX:
    152			return "OSX_10GIG";
    153		default:
    154			return "unknown";
    155		}
    156	}
    157	return "n/a";
    158}
    159
    160void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
    161			 int clear_start_mask)
    162{
    163	unsigned long flags;
    164
    165	spin_lock_irqsave(&card->thread_mask_lock, flags);
    166	card->thread_allowed_mask = threads;
    167	if (clear_start_mask)
    168		card->thread_start_mask &= threads;
    169	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
    170	wake_up(&card->wait_q);
    171}
    172EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
    173
    174int qeth_threads_running(struct qeth_card *card, unsigned long threads)
    175{
    176	unsigned long flags;
    177	int rc = 0;
    178
    179	spin_lock_irqsave(&card->thread_mask_lock, flags);
    180	rc = (card->thread_running_mask & threads);
    181	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
    182	return rc;
    183}
    184EXPORT_SYMBOL_GPL(qeth_threads_running);
    185
    186static void qeth_clear_working_pool_list(struct qeth_card *card)
    187{
    188	struct qeth_buffer_pool_entry *pool_entry, *tmp;
    189	struct qeth_qdio_q *queue = card->qdio.in_q;
    190	unsigned int i;
    191
    192	QETH_CARD_TEXT(card, 5, "clwrklst");
    193	list_for_each_entry_safe(pool_entry, tmp,
    194				 &card->qdio.in_buf_pool.entry_list, list)
    195		list_del(&pool_entry->list);
    196
    197	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
    198		queue->bufs[i].pool_entry = NULL;
    199}
    200
    201static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
    202{
    203	unsigned int i;
    204
    205	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
    206		if (entry->elements[i])
    207			__free_page(entry->elements[i]);
    208	}
    209
    210	kfree(entry);
    211}
    212
    213static void qeth_free_buffer_pool(struct qeth_card *card)
    214{
    215	struct qeth_buffer_pool_entry *entry, *tmp;
    216
    217	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
    218				 init_list) {
    219		list_del(&entry->init_list);
    220		qeth_free_pool_entry(entry);
    221	}
    222}
    223
    224static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
    225{
    226	struct qeth_buffer_pool_entry *entry;
    227	unsigned int i;
    228
    229	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
    230	if (!entry)
    231		return NULL;
    232
    233	for (i = 0; i < pages; i++) {
    234		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
    235
    236		if (!entry->elements[i]) {
    237			qeth_free_pool_entry(entry);
    238			return NULL;
    239		}
    240	}
    241
    242	return entry;
    243}
    244
    245static int qeth_alloc_buffer_pool(struct qeth_card *card)
    246{
    247	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
    248	unsigned int i;
    249
    250	QETH_CARD_TEXT(card, 5, "alocpool");
    251	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
    252		struct qeth_buffer_pool_entry *entry;
    253
    254		entry = qeth_alloc_pool_entry(buf_elements);
    255		if (!entry) {
    256			qeth_free_buffer_pool(card);
    257			return -ENOMEM;
    258		}
    259
    260		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
    261	}
    262	return 0;
    263}
    264
    265int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
    266{
    267	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
    268	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
    269	struct qeth_buffer_pool_entry *entry, *tmp;
    270	int delta = count - pool->buf_count;
    271	LIST_HEAD(entries);
    272
    273	QETH_CARD_TEXT(card, 2, "realcbp");
    274
    275	/* Defer until pool is allocated: */
    276	if (list_empty(&pool->entry_list))
    277		goto out;
    278
    279	/* Remove entries from the pool: */
    280	while (delta < 0) {
    281		entry = list_first_entry(&pool->entry_list,
    282					 struct qeth_buffer_pool_entry,
    283					 init_list);
    284		list_del(&entry->init_list);
    285		qeth_free_pool_entry(entry);
    286
    287		delta++;
    288	}
    289
    290	/* Allocate additional entries: */
    291	while (delta > 0) {
    292		entry = qeth_alloc_pool_entry(buf_elements);
    293		if (!entry) {
    294			list_for_each_entry_safe(entry, tmp, &entries,
    295						 init_list) {
    296				list_del(&entry->init_list);
    297				qeth_free_pool_entry(entry);
    298			}
    299
    300			return -ENOMEM;
    301		}
    302
    303		list_add(&entry->init_list, &entries);
    304
    305		delta--;
    306	}
    307
    308	list_splice(&entries, &pool->entry_list);
    309
    310out:
    311	card->qdio.in_buf_pool.buf_count = count;
    312	pool->buf_count = count;
    313	return 0;
    314}
    315EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
    316
    317static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
    318{
    319	if (!q)
    320		return;
    321
    322	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
    323	kfree(q);
    324}
    325
    326static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
    327{
    328	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
    329	int i;
    330
    331	if (!q)
    332		return NULL;
    333
    334	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
    335		kfree(q);
    336		return NULL;
    337	}
    338
    339	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
    340		q->bufs[i].buffer = q->qdio_bufs[i];
    341
    342	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
    343	return q;
    344}
    345
    346static int qeth_cq_init(struct qeth_card *card)
    347{
    348	int rc;
    349
    350	if (card->options.cq == QETH_CQ_ENABLED) {
    351		QETH_CARD_TEXT(card, 2, "cqinit");
    352		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
    353				   QDIO_MAX_BUFFERS_PER_Q);
    354		card->qdio.c_q->next_buf_to_init = 127;
    355
    356		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
    357		if (rc) {
    358			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
    359			goto out;
    360		}
    361	}
    362	rc = 0;
    363out:
    364	return rc;
    365}
    366
    367static int qeth_alloc_cq(struct qeth_card *card)
    368{
    369	if (card->options.cq == QETH_CQ_ENABLED) {
    370		QETH_CARD_TEXT(card, 2, "cqon");
    371		card->qdio.c_q = qeth_alloc_qdio_queue();
    372		if (!card->qdio.c_q) {
    373			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
    374			return -ENOMEM;
    375		}
    376	} else {
    377		QETH_CARD_TEXT(card, 2, "nocq");
    378		card->qdio.c_q = NULL;
    379	}
    380	return 0;
    381}
    382
    383static void qeth_free_cq(struct qeth_card *card)
    384{
    385	if (card->qdio.c_q) {
    386		qeth_free_qdio_queue(card->qdio.c_q);
    387		card->qdio.c_q = NULL;
    388	}
    389}
    390
    391static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
    392							int delayed)
    393{
    394	enum iucv_tx_notify n;
    395
    396	switch (sbalf15) {
    397	case 0:
    398		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
    399		break;
    400	case 4:
    401	case 16:
    402	case 17:
    403	case 18:
    404		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
    405			TX_NOTIFY_UNREACHABLE;
    406		break;
    407	default:
    408		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
    409			TX_NOTIFY_GENERALERROR;
    410		break;
    411	}
    412
    413	return n;
    414}
    415
    416static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
    417{
    418	if (refcount_dec_and_test(&iob->ref_count)) {
    419		kfree(iob->data);
    420		kfree(iob);
    421	}
    422}
    423static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
    424			   void *data)
    425{
    426	ccw->cmd_code = cmd_code;
    427	ccw->flags = flags | CCW_FLAG_SLI;
    428	ccw->count = len;
    429	ccw->cda = (__u32)virt_to_phys(data);
    430}
    431
    432static int __qeth_issue_next_read(struct qeth_card *card)
    433{
    434	struct qeth_cmd_buffer *iob = card->read_cmd;
    435	struct qeth_channel *channel = iob->channel;
    436	struct ccw1 *ccw = __ccw_from_cmd(iob);
    437	int rc;
    438
    439	QETH_CARD_TEXT(card, 5, "issnxrd");
    440	if (channel->state != CH_STATE_UP)
    441		return -EIO;
    442
    443	memset(iob->data, 0, iob->length);
    444	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
    445	iob->callback = qeth_issue_next_read_cb;
    446	/* keep the cmd alive after completion: */
    447	qeth_get_cmd(iob);
    448
    449	QETH_CARD_TEXT(card, 6, "noirqpnd");
    450	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
    451	if (!rc) {
    452		channel->active_cmd = iob;
    453	} else {
    454		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
    455				 rc, CARD_DEVID(card));
    456		qeth_unlock_channel(card, channel);
    457		qeth_put_cmd(iob);
    458		card->read_or_write_problem = 1;
    459		qeth_schedule_recovery(card);
    460	}
    461	return rc;
    462}
    463
    464static int qeth_issue_next_read(struct qeth_card *card)
    465{
    466	int ret;
    467
    468	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
    469	ret = __qeth_issue_next_read(card);
    470	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
    471
    472	return ret;
    473}
    474
    475static void qeth_enqueue_cmd(struct qeth_card *card,
    476			     struct qeth_cmd_buffer *iob)
    477{
    478	spin_lock_irq(&card->lock);
    479	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
    480	spin_unlock_irq(&card->lock);
    481}
    482
    483static void qeth_dequeue_cmd(struct qeth_card *card,
    484			     struct qeth_cmd_buffer *iob)
    485{
    486	spin_lock_irq(&card->lock);
    487	list_del(&iob->list_entry);
    488	spin_unlock_irq(&card->lock);
    489}
    490
    491static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
    492{
    493	iob->rc = reason;
    494	complete(&iob->done);
    495}
    496
    497static void qeth_flush_local_addrs4(struct qeth_card *card)
    498{
    499	struct qeth_local_addr *addr;
    500	struct hlist_node *tmp;
    501	unsigned int i;
    502
    503	spin_lock_irq(&card->local_addrs4_lock);
    504	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
    505		hash_del_rcu(&addr->hnode);
    506		kfree_rcu(addr, rcu);
    507	}
    508	spin_unlock_irq(&card->local_addrs4_lock);
    509}
    510
    511static void qeth_flush_local_addrs6(struct qeth_card *card)
    512{
    513	struct qeth_local_addr *addr;
    514	struct hlist_node *tmp;
    515	unsigned int i;
    516
    517	spin_lock_irq(&card->local_addrs6_lock);
    518	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
    519		hash_del_rcu(&addr->hnode);
    520		kfree_rcu(addr, rcu);
    521	}
    522	spin_unlock_irq(&card->local_addrs6_lock);
    523}
    524
    525static void qeth_flush_local_addrs(struct qeth_card *card)
    526{
    527	qeth_flush_local_addrs4(card);
    528	qeth_flush_local_addrs6(card);
    529}
    530
    531static void qeth_add_local_addrs4(struct qeth_card *card,
    532				  struct qeth_ipacmd_local_addrs4 *cmd)
    533{
    534	unsigned int i;
    535
    536	if (cmd->addr_length !=
    537	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
    538		dev_err_ratelimited(&card->gdev->dev,
    539				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
    540				    cmd->addr_length);
    541		return;
    542	}
    543
    544	spin_lock(&card->local_addrs4_lock);
    545	for (i = 0; i < cmd->count; i++) {
    546		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
    547		struct qeth_local_addr *addr;
    548		bool duplicate = false;
    549
    550		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
    551			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
    552				duplicate = true;
    553				break;
    554			}
    555		}
    556
    557		if (duplicate)
    558			continue;
    559
    560		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
    561		if (!addr) {
    562			dev_err(&card->gdev->dev,
    563				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
    564				&cmd->addrs[i].addr);
    565			continue;
    566		}
    567
    568		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
    569		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
    570	}
    571	spin_unlock(&card->local_addrs4_lock);
    572}
    573
    574static void qeth_add_local_addrs6(struct qeth_card *card,
    575				  struct qeth_ipacmd_local_addrs6 *cmd)
    576{
    577	unsigned int i;
    578
    579	if (cmd->addr_length !=
    580	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
    581		dev_err_ratelimited(&card->gdev->dev,
    582				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
    583				    cmd->addr_length);
    584		return;
    585	}
    586
    587	spin_lock(&card->local_addrs6_lock);
    588	for (i = 0; i < cmd->count; i++) {
    589		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
    590		struct qeth_local_addr *addr;
    591		bool duplicate = false;
    592
    593		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
    594			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
    595				duplicate = true;
    596				break;
    597			}
    598		}
    599
    600		if (duplicate)
    601			continue;
    602
    603		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
    604		if (!addr) {
    605			dev_err(&card->gdev->dev,
    606				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
    607				&cmd->addrs[i].addr);
    608			continue;
    609		}
    610
    611		addr->addr = cmd->addrs[i].addr;
    612		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
    613	}
    614	spin_unlock(&card->local_addrs6_lock);
    615}
    616
    617static void qeth_del_local_addrs4(struct qeth_card *card,
    618				  struct qeth_ipacmd_local_addrs4 *cmd)
    619{
    620	unsigned int i;
    621
    622	if (cmd->addr_length !=
    623	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
    624		dev_err_ratelimited(&card->gdev->dev,
    625				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
    626				    cmd->addr_length);
    627		return;
    628	}
    629
    630	spin_lock(&card->local_addrs4_lock);
    631	for (i = 0; i < cmd->count; i++) {
    632		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
    633		unsigned int key = ipv4_addr_hash(addr->addr);
    634		struct qeth_local_addr *tmp;
    635
    636		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
    637			if (tmp->addr.s6_addr32[3] == addr->addr) {
    638				hash_del_rcu(&tmp->hnode);
    639				kfree_rcu(tmp, rcu);
    640				break;
    641			}
    642		}
    643	}
    644	spin_unlock(&card->local_addrs4_lock);
    645}
    646
    647static void qeth_del_local_addrs6(struct qeth_card *card,
    648				  struct qeth_ipacmd_local_addrs6 *cmd)
    649{
    650	unsigned int i;
    651
    652	if (cmd->addr_length !=
    653	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
    654		dev_err_ratelimited(&card->gdev->dev,
    655				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
    656				    cmd->addr_length);
    657		return;
    658	}
    659
    660	spin_lock(&card->local_addrs6_lock);
    661	for (i = 0; i < cmd->count; i++) {
    662		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
    663		u32 key = ipv6_addr_hash(&addr->addr);
    664		struct qeth_local_addr *tmp;
    665
    666		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
    667			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
    668				hash_del_rcu(&tmp->hnode);
    669				kfree_rcu(tmp, rcu);
    670				break;
    671			}
    672		}
    673	}
    674	spin_unlock(&card->local_addrs6_lock);
    675}
    676
    677static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
    678				      struct sk_buff *skb)
    679{
    680	struct qeth_local_addr *tmp;
    681	bool is_local = false;
    682	unsigned int key;
    683	__be32 next_hop;
    684
    685	if (hash_empty(card->local_addrs4))
    686		return false;
    687
    688	rcu_read_lock();
    689	next_hop = qeth_next_hop_v4_rcu(skb,
    690					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
    691	key = ipv4_addr_hash(next_hop);
    692
    693	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
    694		if (tmp->addr.s6_addr32[3] == next_hop) {
    695			is_local = true;
    696			break;
    697		}
    698	}
    699	rcu_read_unlock();
    700
    701	return is_local;
    702}
    703
    704static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
    705				      struct sk_buff *skb)
    706{
    707	struct qeth_local_addr *tmp;
    708	struct in6_addr *next_hop;
    709	bool is_local = false;
    710	u32 key;
    711
    712	if (hash_empty(card->local_addrs6))
    713		return false;
    714
    715	rcu_read_lock();
    716	next_hop = qeth_next_hop_v6_rcu(skb,
    717					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
    718	key = ipv6_addr_hash(next_hop);
    719
    720	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
    721		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
    722			is_local = true;
    723			break;
    724		}
    725	}
    726	rcu_read_unlock();
    727
    728	return is_local;
    729}
    730
    731static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
    732{
    733	struct qeth_card *card = m->private;
    734	struct qeth_local_addr *tmp;
    735	unsigned int i;
    736
    737	rcu_read_lock();
    738	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
    739		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
    740	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
    741		seq_printf(m, "%pI6c\n", &tmp->addr);
    742	rcu_read_unlock();
    743
    744	return 0;
    745}
    746
    747DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
    748
    749static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
    750		struct qeth_card *card)
    751{
    752	const char *ipa_name;
    753	int com = cmd->hdr.command;
    754
    755	ipa_name = qeth_get_ipa_cmd_name(com);
    756
    757	if (rc)
    758		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
    759				 ipa_name, com, CARD_DEVID(card), rc,
    760				 qeth_get_ipa_msg(rc));
    761	else
    762		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
    763				 ipa_name, com, CARD_DEVID(card));
    764}
    765
    766static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
    767						struct qeth_ipa_cmd *cmd)
    768{
    769	QETH_CARD_TEXT(card, 5, "chkipad");
    770
    771	if (IS_IPA_REPLY(cmd)) {
    772		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
    773			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
    774		return cmd;
    775	}
    776
    777	/* handle unsolicited event: */
    778	switch (cmd->hdr.command) {
    779	case IPA_CMD_STOPLAN:
    780		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
    781			dev_err(&card->gdev->dev,
    782				"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
    783				netdev_name(card->dev));
    784			/* Set offline, then probably fail to set online: */
    785			qeth_schedule_recovery(card);
    786		} else {
    787			/* stay online for subsequent STARTLAN */
    788			dev_warn(&card->gdev->dev,
    789				 "The link for interface %s on CHPID 0x%X failed\n",
    790				 netdev_name(card->dev), card->info.chpid);
    791			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
    792			netif_carrier_off(card->dev);
    793		}
    794		return NULL;
    795	case IPA_CMD_STARTLAN:
    796		dev_info(&card->gdev->dev,
    797			 "The link for %s on CHPID 0x%X has been restored\n",
    798			 netdev_name(card->dev), card->info.chpid);
    799		if (card->info.hwtrap)
    800			card->info.hwtrap = 2;
    801		qeth_schedule_recovery(card);
    802		return NULL;
    803	case IPA_CMD_SETBRIDGEPORT_IQD:
    804	case IPA_CMD_SETBRIDGEPORT_OSA:
    805	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
    806		if (card->discipline->control_event_handler(card, cmd))
    807			return cmd;
    808		return NULL;
    809	case IPA_CMD_REGISTER_LOCAL_ADDR:
    810		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
    811			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
    812		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
    813			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
    814
    815		QETH_CARD_TEXT(card, 3, "irla");
    816		return NULL;
    817	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
    818		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
    819			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
    820		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
    821			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
    822
    823		QETH_CARD_TEXT(card, 3, "urla");
    824		return NULL;
    825	default:
    826		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
    827		return cmd;
    828	}
    829}
    830
    831static void qeth_clear_ipacmd_list(struct qeth_card *card)
    832{
    833	struct qeth_cmd_buffer *iob;
    834	unsigned long flags;
    835
    836	QETH_CARD_TEXT(card, 4, "clipalst");
    837
    838	spin_lock_irqsave(&card->lock, flags);
    839	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
    840		qeth_notify_cmd(iob, -ECANCELED);
    841	spin_unlock_irqrestore(&card->lock, flags);
    842}
    843
    844static int qeth_check_idx_response(struct qeth_card *card,
    845	unsigned char *buffer)
    846{
    847	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
    848	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
    849		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
    850				 buffer[4]);
    851		QETH_CARD_TEXT(card, 2, "ckidxres");
    852		QETH_CARD_TEXT(card, 2, " idxterm");
    853		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
    854		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
    855		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
    856			dev_err(&card->gdev->dev,
    857				"The device does not support the configured transport mode\n");
    858			return -EPROTONOSUPPORT;
    859		}
    860		return -EIO;
    861	}
    862	return 0;
    863}
    864
    865static void qeth_release_buffer_cb(struct qeth_card *card,
    866				   struct qeth_cmd_buffer *iob,
    867				   unsigned int data_length)
    868{
    869	qeth_put_cmd(iob);
    870}
    871
    872static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
    873{
    874	qeth_notify_cmd(iob, rc);
    875	qeth_put_cmd(iob);
    876}
    877
    878static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
    879					      unsigned int length,
    880					      unsigned int ccws, long timeout)
    881{
    882	struct qeth_cmd_buffer *iob;
    883
    884	if (length > QETH_BUFSIZE)
    885		return NULL;
    886
    887	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
    888	if (!iob)
    889		return NULL;
    890
    891	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
    892			    GFP_KERNEL | GFP_DMA);
    893	if (!iob->data) {
    894		kfree(iob);
    895		return NULL;
    896	}
    897
    898	init_completion(&iob->done);
    899	spin_lock_init(&iob->lock);
    900	refcount_set(&iob->ref_count, 1);
    901	iob->channel = channel;
    902	iob->timeout = timeout;
    903	iob->length = length;
    904	return iob;
    905}
    906
    907static void qeth_issue_next_read_cb(struct qeth_card *card,
    908				    struct qeth_cmd_buffer *iob,
    909				    unsigned int data_length)
    910{
    911	struct qeth_cmd_buffer *request = NULL;
    912	struct qeth_ipa_cmd *cmd = NULL;
    913	struct qeth_reply *reply = NULL;
    914	struct qeth_cmd_buffer *tmp;
    915	unsigned long flags;
    916	int rc = 0;
    917
    918	QETH_CARD_TEXT(card, 4, "sndctlcb");
    919	rc = qeth_check_idx_response(card, iob->data);
    920	switch (rc) {
    921	case 0:
    922		break;
    923	case -EIO:
    924		qeth_schedule_recovery(card);
    925		fallthrough;
    926	default:
    927		qeth_clear_ipacmd_list(card);
    928		goto err_idx;
    929	}
    930
    931	cmd = __ipa_reply(iob);
    932	if (cmd) {
    933		cmd = qeth_check_ipa_data(card, cmd);
    934		if (!cmd)
    935			goto out;
    936	}
    937
    938	/* match against pending cmd requests */
    939	spin_lock_irqsave(&card->lock, flags);
    940	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
    941		if (tmp->match && tmp->match(tmp, iob)) {
    942			request = tmp;
    943			/* take the object outside the lock */
    944			qeth_get_cmd(request);
    945			break;
    946		}
    947	}
    948	spin_unlock_irqrestore(&card->lock, flags);
    949
    950	if (!request)
    951		goto out;
    952
    953	reply = &request->reply;
    954	if (!reply->callback) {
    955		rc = 0;
    956		goto no_callback;
    957	}
    958
    959	spin_lock_irqsave(&request->lock, flags);
    960	if (request->rc)
    961		/* Bail out when the requestor has already left: */
    962		rc = request->rc;
    963	else
    964		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
    965							(unsigned long)iob);
    966	spin_unlock_irqrestore(&request->lock, flags);
    967
    968no_callback:
    969	if (rc <= 0)
    970		qeth_notify_cmd(request, rc);
    971	qeth_put_cmd(request);
    972out:
    973	memcpy(&card->seqno.pdu_hdr_ack,
    974		QETH_PDU_HEADER_SEQ_NO(iob->data),
    975		QETH_SEQ_NO_LENGTH);
    976	__qeth_issue_next_read(card);
    977err_idx:
    978	qeth_put_cmd(iob);
    979}
    980
    981static int qeth_set_thread_start_bit(struct qeth_card *card,
    982		unsigned long thread)
    983{
    984	unsigned long flags;
    985	int rc = 0;
    986
    987	spin_lock_irqsave(&card->thread_mask_lock, flags);
    988	if (!(card->thread_allowed_mask & thread))
    989		rc = -EPERM;
    990	else if (card->thread_start_mask & thread)
    991		rc = -EBUSY;
    992	else
    993		card->thread_start_mask |= thread;
    994	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
    995
    996	return rc;
    997}
    998
    999static void qeth_clear_thread_start_bit(struct qeth_card *card,
   1000					unsigned long thread)
   1001{
   1002	unsigned long flags;
   1003
   1004	spin_lock_irqsave(&card->thread_mask_lock, flags);
   1005	card->thread_start_mask &= ~thread;
   1006	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
   1007	wake_up(&card->wait_q);
   1008}
   1009
   1010static void qeth_clear_thread_running_bit(struct qeth_card *card,
   1011					  unsigned long thread)
   1012{
   1013	unsigned long flags;
   1014
   1015	spin_lock_irqsave(&card->thread_mask_lock, flags);
   1016	card->thread_running_mask &= ~thread;
   1017	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
   1018	wake_up_all(&card->wait_q);
   1019}
   1020
   1021static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
   1022{
   1023	unsigned long flags;
   1024	int rc = 0;
   1025
   1026	spin_lock_irqsave(&card->thread_mask_lock, flags);
   1027	if (card->thread_start_mask & thread) {
   1028		if ((card->thread_allowed_mask & thread) &&
   1029		    !(card->thread_running_mask & thread)) {
   1030			rc = 1;
   1031			card->thread_start_mask &= ~thread;
   1032			card->thread_running_mask |= thread;
   1033		} else
   1034			rc = -EPERM;
   1035	}
   1036	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
   1037	return rc;
   1038}
   1039
   1040static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
   1041{
   1042	int rc = 0;
   1043
   1044	wait_event(card->wait_q,
   1045		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
   1046	return rc;
   1047}
   1048
   1049int qeth_schedule_recovery(struct qeth_card *card)
   1050{
   1051	int rc;
   1052
   1053	QETH_CARD_TEXT(card, 2, "startrec");
   1054
   1055	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
   1056	if (!rc)
   1057		schedule_work(&card->kernel_thread_starter);
   1058
   1059	return rc;
   1060}
   1061
   1062static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
   1063			    struct irb *irb)
   1064{
   1065	int dstat, cstat;
   1066	char *sense;
   1067
   1068	sense = (char *) irb->ecw;
   1069	cstat = irb->scsw.cmd.cstat;
   1070	dstat = irb->scsw.cmd.dstat;
   1071
   1072	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
   1073		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
   1074		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
   1075		QETH_CARD_TEXT(card, 2, "CGENCHK");
   1076		dev_warn(&cdev->dev, "The qeth device driver "
   1077			"failed to recover an error on the device\n");
   1078		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
   1079				 CCW_DEVID(cdev), dstat, cstat);
   1080		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
   1081				16, 1, irb, 64, 1);
   1082		return -EIO;
   1083	}
   1084
   1085	if (dstat & DEV_STAT_UNIT_CHECK) {
   1086		if (sense[SENSE_RESETTING_EVENT_BYTE] &
   1087		    SENSE_RESETTING_EVENT_FLAG) {
   1088			QETH_CARD_TEXT(card, 2, "REVIND");
   1089			return -EIO;
   1090		}
   1091		if (sense[SENSE_COMMAND_REJECT_BYTE] &
   1092		    SENSE_COMMAND_REJECT_FLAG) {
   1093			QETH_CARD_TEXT(card, 2, "CMDREJi");
   1094			return -EIO;
   1095		}
   1096		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
   1097			QETH_CARD_TEXT(card, 2, "AFFE");
   1098			return -EIO;
   1099		}
   1100		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
   1101			QETH_CARD_TEXT(card, 2, "ZEROSEN");
   1102			return 0;
   1103		}
   1104		QETH_CARD_TEXT(card, 2, "DGENCHK");
   1105		return -EIO;
   1106	}
   1107	return 0;
   1108}
   1109
   1110static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
   1111				struct irb *irb)
   1112{
   1113	if (!IS_ERR(irb))
   1114		return 0;
   1115
   1116	switch (PTR_ERR(irb)) {
   1117	case -EIO:
   1118		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
   1119				 CCW_DEVID(cdev));
   1120		QETH_CARD_TEXT(card, 2, "ckirberr");
   1121		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
   1122		return -EIO;
   1123	case -ETIMEDOUT:
   1124		dev_warn(&cdev->dev, "A hardware operation timed out"
   1125			" on the device\n");
   1126		QETH_CARD_TEXT(card, 2, "ckirberr");
   1127		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
   1128		return -ETIMEDOUT;
   1129	default:
   1130		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
   1131				 PTR_ERR(irb), CCW_DEVID(cdev));
   1132		QETH_CARD_TEXT(card, 2, "ckirberr");
   1133		QETH_CARD_TEXT(card, 2, "  rc???");
   1134		return PTR_ERR(irb);
   1135	}
   1136}
   1137
   1138static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
   1139		struct irb *irb)
   1140{
   1141	int rc;
   1142	int cstat, dstat;
   1143	struct qeth_cmd_buffer *iob = NULL;
   1144	struct ccwgroup_device *gdev;
   1145	struct qeth_channel *channel;
   1146	struct qeth_card *card;
   1147
   1148	/* while we hold the ccwdev lock, this stays valid: */
   1149	gdev = dev_get_drvdata(&cdev->dev);
   1150	card = dev_get_drvdata(&gdev->dev);
   1151
   1152	QETH_CARD_TEXT(card, 5, "irq");
   1153
   1154	if (card->read.ccwdev == cdev) {
   1155		channel = &card->read;
   1156		QETH_CARD_TEXT(card, 5, "read");
   1157	} else if (card->write.ccwdev == cdev) {
   1158		channel = &card->write;
   1159		QETH_CARD_TEXT(card, 5, "write");
   1160	} else {
   1161		channel = &card->data;
   1162		QETH_CARD_TEXT(card, 5, "data");
   1163	}
   1164
   1165	if (intparm == 0) {
   1166		QETH_CARD_TEXT(card, 5, "irqunsol");
   1167	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
   1168		QETH_CARD_TEXT(card, 5, "irqunexp");
   1169
   1170		dev_err(&cdev->dev,
   1171			"Received IRQ with intparm %lx, expected %px\n",
   1172			intparm, channel->active_cmd);
   1173		if (channel->active_cmd)
   1174			qeth_cancel_cmd(channel->active_cmd, -EIO);
   1175	} else {
   1176		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
   1177	}
   1178
   1179	qeth_unlock_channel(card, channel);
   1180
   1181	rc = qeth_check_irb_error(card, cdev, irb);
   1182	if (rc) {
   1183		/* IO was terminated, free its resources. */
   1184		if (iob)
   1185			qeth_cancel_cmd(iob, rc);
   1186		return;
   1187	}
   1188
   1189	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
   1190		channel->state = CH_STATE_STOPPED;
   1191		wake_up(&card->wait_q);
   1192	}
   1193
   1194	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
   1195		channel->state = CH_STATE_HALTED;
   1196		wake_up(&card->wait_q);
   1197	}
   1198
   1199	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
   1200					  SCSW_FCTL_HALT_FUNC))) {
   1201		qeth_cancel_cmd(iob, -ECANCELED);
   1202		iob = NULL;
   1203	}
   1204
   1205	cstat = irb->scsw.cmd.cstat;
   1206	dstat = irb->scsw.cmd.dstat;
   1207
   1208	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
   1209	    (dstat & DEV_STAT_UNIT_CHECK) ||
   1210	    (cstat)) {
   1211		if (irb->esw.esw0.erw.cons) {
   1212			dev_warn(&channel->ccwdev->dev,
   1213				"The qeth device driver failed to recover "
   1214				"an error on the device\n");
   1215			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
   1216					 CCW_DEVID(channel->ccwdev), cstat,
   1217					 dstat);
   1218			print_hex_dump(KERN_WARNING, "qeth: irb ",
   1219				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
   1220			print_hex_dump(KERN_WARNING, "qeth: sense data ",
   1221				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
   1222		}
   1223
   1224		rc = qeth_get_problem(card, cdev, irb);
   1225		if (rc) {
   1226			card->read_or_write_problem = 1;
   1227			if (iob)
   1228				qeth_cancel_cmd(iob, rc);
   1229			qeth_clear_ipacmd_list(card);
   1230			qeth_schedule_recovery(card);
   1231			return;
   1232		}
   1233	}
   1234
   1235	if (iob) {
   1236		/* sanity check: */
   1237		if (irb->scsw.cmd.count > iob->length) {
   1238			qeth_cancel_cmd(iob, -EIO);
   1239			return;
   1240		}
   1241		if (iob->callback)
   1242			iob->callback(card, iob,
   1243				      iob->length - irb->scsw.cmd.count);
   1244	}
   1245}
   1246
   1247static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
   1248		struct qeth_qdio_out_buffer *buf,
   1249		enum iucv_tx_notify notification)
   1250{
   1251	struct sk_buff *skb;
   1252
   1253	skb_queue_walk(&buf->skb_list, skb) {
   1254		struct sock *sk = skb->sk;
   1255
   1256		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
   1257		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
   1258		if (sk && sk->sk_family == PF_IUCV)
   1259			iucv_sk(sk)->sk_txnotify(sk, notification);
   1260	}
   1261}
   1262
   1263static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
   1264				 struct qeth_qdio_out_buffer *buf, bool error,
   1265				 int budget)
   1266{
   1267	struct sk_buff *skb;
   1268
   1269	/* Empty buffer? */
   1270	if (buf->next_element_to_fill == 0)
   1271		return;
   1272
   1273	QETH_TXQ_STAT_INC(queue, bufs);
   1274	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
   1275	if (error) {
   1276		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
   1277	} else {
   1278		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
   1279		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
   1280	}
   1281
   1282	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
   1283		unsigned int bytes = qdisc_pkt_len(skb);
   1284		bool is_tso = skb_is_gso(skb);
   1285		unsigned int packets;
   1286
   1287		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
   1288		if (!error) {
   1289			if (skb->ip_summed == CHECKSUM_PARTIAL)
   1290				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
   1291			if (skb_is_nonlinear(skb))
   1292				QETH_TXQ_STAT_INC(queue, skbs_sg);
   1293			if (is_tso) {
   1294				QETH_TXQ_STAT_INC(queue, skbs_tso);
   1295				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
   1296			}
   1297		}
   1298
   1299		napi_consume_skb(skb, budget);
   1300	}
   1301}
   1302
   1303static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
   1304				     struct qeth_qdio_out_buffer *buf,
   1305				     bool error, int budget)
   1306{
   1307	int i;
   1308
   1309	/* is PCI flag set on buffer? */
   1310	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
   1311		atomic_dec(&queue->set_pci_flags_count);
   1312		QETH_TXQ_STAT_INC(queue, completion_irq);
   1313	}
   1314
   1315	qeth_tx_complete_buf(queue, buf, error, budget);
   1316
   1317	for (i = 0; i < queue->max_elements; ++i) {
   1318		void *data = phys_to_virt(buf->buffer->element[i].addr);
   1319
   1320		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
   1321			kmem_cache_free(qeth_core_header_cache, data);
   1322	}
   1323
   1324	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
   1325	buf->next_element_to_fill = 0;
   1326	buf->frames = 0;
   1327	buf->bytes = 0;
   1328	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
   1329}
   1330
   1331static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
   1332{
   1333	if (buf->aob)
   1334		kmem_cache_free(qeth_qaob_cache, buf->aob);
   1335	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
   1336}
   1337
   1338static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
   1339					  struct qeth_qdio_out_q *queue,
   1340					  bool drain, int budget)
   1341{
   1342	struct qeth_qdio_out_buffer *buf, *tmp;
   1343
   1344	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
   1345		struct qeth_qaob_priv1 *priv;
   1346		struct qaob *aob = buf->aob;
   1347		enum iucv_tx_notify notify;
   1348		unsigned int i;
   1349
   1350		priv = (struct qeth_qaob_priv1 *)&aob->user1;
   1351		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
   1352			QETH_CARD_TEXT(card, 5, "fp");
   1353			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
   1354
   1355			notify = drain ? TX_NOTIFY_GENERALERROR :
   1356					 qeth_compute_cq_notification(aob->aorc, 1);
   1357			qeth_notify_skbs(queue, buf, notify);
   1358			qeth_tx_complete_buf(queue, buf, drain, budget);
   1359
   1360			for (i = 0;
   1361			     i < aob->sb_count && i < queue->max_elements;
   1362			     i++) {
   1363				void *data = phys_to_virt(aob->sba[i]);
   1364
   1365				if (test_bit(i, buf->from_kmem_cache) && data)
   1366					kmem_cache_free(qeth_core_header_cache,
   1367							data);
   1368			}
   1369
   1370			list_del(&buf->list_entry);
   1371			qeth_free_out_buf(buf);
   1372		}
   1373	}
   1374}
   1375
   1376static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
   1377{
   1378	int j;
   1379
   1380	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
   1381
   1382	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
   1383		if (!q->bufs[j])
   1384			continue;
   1385
   1386		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
   1387		if (free) {
   1388			qeth_free_out_buf(q->bufs[j]);
   1389			q->bufs[j] = NULL;
   1390		}
   1391	}
   1392}
   1393
   1394static void qeth_drain_output_queues(struct qeth_card *card)
   1395{
   1396	int i;
   1397
   1398	QETH_CARD_TEXT(card, 2, "clearqdbf");
   1399	/* clear outbound buffers to free skbs */
   1400	for (i = 0; i < card->qdio.no_out_queues; ++i) {
   1401		if (card->qdio.out_qs[i])
   1402			qeth_drain_output_queue(card->qdio.out_qs[i], false);
   1403	}
   1404}
   1405
   1406static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
   1407{
   1408	unsigned int max = single ? 1 : card->dev->num_tx_queues;
   1409
   1410	if (card->qdio.no_out_queues == max)
   1411		return;
   1412
   1413	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
   1414		qeth_free_qdio_queues(card);
   1415
   1416	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
   1417		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
   1418
   1419	card->qdio.no_out_queues = max;
   1420}
   1421
   1422static int qeth_update_from_chp_desc(struct qeth_card *card)
   1423{
   1424	struct ccw_device *ccwdev;
   1425	struct channel_path_desc_fmt0 *chp_dsc;
   1426
   1427	QETH_CARD_TEXT(card, 2, "chp_desc");
   1428
   1429	ccwdev = card->data.ccwdev;
   1430	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
   1431	if (!chp_dsc)
   1432		return -ENOMEM;
   1433
   1434	card->info.func_level = 0x4100 + chp_dsc->desc;
   1435
   1436	if (IS_OSD(card) || IS_OSX(card))
   1437		/* CHPP field bit 6 == 1 -> single queue */
   1438		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
   1439
   1440	kfree(chp_dsc);
   1441	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
   1442	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
   1443	return 0;
   1444}
   1445
   1446static void qeth_init_qdio_info(struct qeth_card *card)
   1447{
   1448	QETH_CARD_TEXT(card, 4, "intqdinf");
   1449	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
   1450	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
   1451	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
   1452
   1453	/* inbound */
   1454	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
   1455	if (IS_IQD(card))
   1456		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
   1457	else
   1458		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
   1459	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
   1460	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
   1461	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
   1462}
   1463
   1464static void qeth_set_initial_options(struct qeth_card *card)
   1465{
   1466	card->options.route4.type = NO_ROUTER;
   1467	card->options.route6.type = NO_ROUTER;
   1468	card->options.isolation = ISOLATION_MODE_NONE;
   1469	card->options.cq = QETH_CQ_DISABLED;
   1470	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
   1471}
   1472
   1473static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
   1474{
   1475	unsigned long flags;
   1476	int rc = 0;
   1477
   1478	spin_lock_irqsave(&card->thread_mask_lock, flags);
   1479	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
   1480			(u8) card->thread_start_mask,
   1481			(u8) card->thread_allowed_mask,
   1482			(u8) card->thread_running_mask);
   1483	rc = (card->thread_start_mask & thread);
   1484	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
   1485	return rc;
   1486}
   1487
   1488static int qeth_do_reset(void *data);
   1489static void qeth_start_kernel_thread(struct work_struct *work)
   1490{
   1491	struct task_struct *ts;
   1492	struct qeth_card *card = container_of(work, struct qeth_card,
   1493					kernel_thread_starter);
   1494	QETH_CARD_TEXT(card, 2, "strthrd");
   1495
   1496	if (card->read.state != CH_STATE_UP &&
   1497	    card->write.state != CH_STATE_UP)
   1498		return;
   1499	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
   1500		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
   1501		if (IS_ERR(ts)) {
   1502			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
   1503			qeth_clear_thread_running_bit(card,
   1504				QETH_RECOVER_THREAD);
   1505		}
   1506	}
   1507}
   1508
   1509static void qeth_buffer_reclaim_work(struct work_struct *);
   1510static void qeth_setup_card(struct qeth_card *card)
   1511{
   1512	QETH_CARD_TEXT(card, 2, "setupcrd");
   1513
   1514	card->info.type = CARD_RDEV(card)->id.driver_info;
   1515	card->state = CARD_STATE_DOWN;
   1516	spin_lock_init(&card->lock);
   1517	spin_lock_init(&card->thread_mask_lock);
   1518	mutex_init(&card->conf_mutex);
   1519	mutex_init(&card->discipline_mutex);
   1520	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
   1521	INIT_LIST_HEAD(&card->cmd_waiter_list);
   1522	init_waitqueue_head(&card->wait_q);
   1523	qeth_set_initial_options(card);
   1524	/* IP address takeover */
   1525	INIT_LIST_HEAD(&card->ipato.entries);
   1526	qeth_init_qdio_info(card);
   1527	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
   1528	hash_init(card->rx_mode_addrs);
   1529	hash_init(card->local_addrs4);
   1530	hash_init(card->local_addrs6);
   1531	spin_lock_init(&card->local_addrs4_lock);
   1532	spin_lock_init(&card->local_addrs6_lock);
   1533}
   1534
   1535static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
   1536{
   1537	struct qeth_card *card = container_of(slr, struct qeth_card,
   1538					qeth_service_level);
   1539	if (card->info.mcl_level[0])
   1540		seq_printf(m, "qeth: %s firmware level %s\n",
   1541			CARD_BUS_ID(card), card->info.mcl_level);
   1542}
   1543
   1544static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
   1545{
   1546	struct qeth_card *card;
   1547
   1548	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
   1549	card = kzalloc(sizeof(*card), GFP_KERNEL);
   1550	if (!card)
   1551		goto out;
   1552	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
   1553
   1554	card->gdev = gdev;
   1555	dev_set_drvdata(&gdev->dev, card);
   1556	CARD_RDEV(card) = gdev->cdev[0];
   1557	CARD_WDEV(card) = gdev->cdev[1];
   1558	CARD_DDEV(card) = gdev->cdev[2];
   1559
   1560	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
   1561						 dev_name(&gdev->dev));
   1562	if (!card->event_wq)
   1563		goto out_wq;
   1564
   1565	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
   1566	if (!card->read_cmd)
   1567		goto out_read_cmd;
   1568
   1569	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
   1570					   qeth_debugfs_root);
   1571	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
   1572			    &qeth_debugfs_local_addr_fops);
   1573
   1574	card->qeth_service_level.seq_print = qeth_core_sl_print;
   1575	register_service_level(&card->qeth_service_level);
   1576	return card;
   1577
   1578out_read_cmd:
   1579	destroy_workqueue(card->event_wq);
   1580out_wq:
   1581	dev_set_drvdata(&gdev->dev, NULL);
   1582	kfree(card);
   1583out:
   1584	return NULL;
   1585}
   1586
   1587static int qeth_clear_channel(struct qeth_card *card,
   1588			      struct qeth_channel *channel)
   1589{
   1590	int rc;
   1591
   1592	QETH_CARD_TEXT(card, 3, "clearch");
   1593	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
   1594	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
   1595	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
   1596
   1597	if (rc)
   1598		return rc;
   1599	rc = wait_event_interruptible_timeout(card->wait_q,
   1600			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
   1601	if (rc == -ERESTARTSYS)
   1602		return rc;
   1603	if (channel->state != CH_STATE_STOPPED)
   1604		return -ETIME;
   1605	channel->state = CH_STATE_DOWN;
   1606	return 0;
   1607}
   1608
   1609static int qeth_halt_channel(struct qeth_card *card,
   1610			     struct qeth_channel *channel)
   1611{
   1612	int rc;
   1613
   1614	QETH_CARD_TEXT(card, 3, "haltch");
   1615	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
   1616	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
   1617	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
   1618
   1619	if (rc)
   1620		return rc;
   1621	rc = wait_event_interruptible_timeout(card->wait_q,
   1622			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
   1623	if (rc == -ERESTARTSYS)
   1624		return rc;
   1625	if (channel->state != CH_STATE_HALTED)
   1626		return -ETIME;
   1627	return 0;
   1628}
   1629
   1630static int qeth_stop_channel(struct qeth_channel *channel)
   1631{
   1632	struct ccw_device *cdev = channel->ccwdev;
   1633	int rc;
   1634
   1635	rc = ccw_device_set_offline(cdev);
   1636
   1637	spin_lock_irq(get_ccwdev_lock(cdev));
   1638	if (channel->active_cmd)
   1639		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
   1640			channel->active_cmd);
   1641
   1642	cdev->handler = NULL;
   1643	spin_unlock_irq(get_ccwdev_lock(cdev));
   1644
   1645	return rc;
   1646}
   1647
   1648static int qeth_start_channel(struct qeth_channel *channel)
   1649{
   1650	struct ccw_device *cdev = channel->ccwdev;
   1651	int rc;
   1652
   1653	channel->state = CH_STATE_DOWN;
   1654	xchg(&channel->active_cmd, NULL);
   1655
   1656	spin_lock_irq(get_ccwdev_lock(cdev));
   1657	cdev->handler = qeth_irq;
   1658	spin_unlock_irq(get_ccwdev_lock(cdev));
   1659
   1660	rc = ccw_device_set_online(cdev);
   1661	if (rc)
   1662		goto err;
   1663
   1664	return 0;
   1665
   1666err:
   1667	spin_lock_irq(get_ccwdev_lock(cdev));
   1668	cdev->handler = NULL;
   1669	spin_unlock_irq(get_ccwdev_lock(cdev));
   1670	return rc;
   1671}
   1672
   1673static int qeth_halt_channels(struct qeth_card *card)
   1674{
   1675	int rc1 = 0, rc2 = 0, rc3 = 0;
   1676
   1677	QETH_CARD_TEXT(card, 3, "haltchs");
   1678	rc1 = qeth_halt_channel(card, &card->read);
   1679	rc2 = qeth_halt_channel(card, &card->write);
   1680	rc3 = qeth_halt_channel(card, &card->data);
   1681	if (rc1)
   1682		return rc1;
   1683	if (rc2)
   1684		return rc2;
   1685	return rc3;
   1686}
   1687
   1688static int qeth_clear_channels(struct qeth_card *card)
   1689{
   1690	int rc1 = 0, rc2 = 0, rc3 = 0;
   1691
   1692	QETH_CARD_TEXT(card, 3, "clearchs");
   1693	rc1 = qeth_clear_channel(card, &card->read);
   1694	rc2 = qeth_clear_channel(card, &card->write);
   1695	rc3 = qeth_clear_channel(card, &card->data);
   1696	if (rc1)
   1697		return rc1;
   1698	if (rc2)
   1699		return rc2;
   1700	return rc3;
   1701}
   1702
   1703static int qeth_clear_halt_card(struct qeth_card *card, int halt)
   1704{
   1705	int rc = 0;
   1706
   1707	QETH_CARD_TEXT(card, 3, "clhacrd");
   1708
   1709	if (halt)
   1710		rc = qeth_halt_channels(card);
   1711	if (rc)
   1712		return rc;
   1713	return qeth_clear_channels(card);
   1714}
   1715
   1716static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
   1717{
   1718	int rc = 0;
   1719
   1720	QETH_CARD_TEXT(card, 3, "qdioclr");
   1721	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
   1722		QETH_QDIO_CLEANING)) {
   1723	case QETH_QDIO_ESTABLISHED:
   1724		if (IS_IQD(card))
   1725			rc = qdio_shutdown(CARD_DDEV(card),
   1726				QDIO_FLAG_CLEANUP_USING_HALT);
   1727		else
   1728			rc = qdio_shutdown(CARD_DDEV(card),
   1729				QDIO_FLAG_CLEANUP_USING_CLEAR);
   1730		if (rc)
   1731			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
   1732		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
   1733		break;
   1734	case QETH_QDIO_CLEANING:
   1735		return rc;
   1736	default:
   1737		break;
   1738	}
   1739	rc = qeth_clear_halt_card(card, use_halt);
   1740	if (rc)
   1741		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
   1742	return rc;
   1743}
   1744
   1745static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
   1746{
   1747	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
   1748	struct diag26c_vnic_resp *response = NULL;
   1749	struct diag26c_vnic_req *request = NULL;
   1750	struct ccw_dev_id id;
   1751	char userid[80];
   1752	int rc = 0;
   1753
   1754	QETH_CARD_TEXT(card, 2, "vmlayer");
   1755
   1756	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
   1757	if (rc)
   1758		goto out;
   1759
   1760	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
   1761	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
   1762	if (!request || !response) {
   1763		rc = -ENOMEM;
   1764		goto out;
   1765	}
   1766
   1767	ccw_device_get_id(CARD_RDEV(card), &id);
   1768	request->resp_buf_len = sizeof(*response);
   1769	request->resp_version = DIAG26C_VERSION6_VM65918;
   1770	request->req_format = DIAG26C_VNIC_INFO;
   1771	ASCEBC(userid, 8);
   1772	memcpy(&request->sys_name, userid, 8);
   1773	request->devno = id.devno;
   1774
   1775	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
   1776	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
   1777	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
   1778	if (rc)
   1779		goto out;
   1780	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
   1781
   1782	if (request->resp_buf_len < sizeof(*response) ||
   1783	    response->version != request->resp_version) {
   1784		rc = -EIO;
   1785		goto out;
   1786	}
   1787
   1788	if (response->protocol == VNIC_INFO_PROT_L2)
   1789		disc = QETH_DISCIPLINE_LAYER2;
   1790	else if (response->protocol == VNIC_INFO_PROT_L3)
   1791		disc = QETH_DISCIPLINE_LAYER3;
   1792
   1793out:
   1794	kfree(response);
   1795	kfree(request);
   1796	if (rc)
   1797		QETH_CARD_TEXT_(card, 2, "err%x", rc);
   1798	return disc;
   1799}
   1800
   1801/* Determine whether the device requires a specific layer discipline */
   1802static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
   1803{
   1804	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
   1805
   1806	if (IS_OSM(card))
   1807		disc = QETH_DISCIPLINE_LAYER2;
   1808	else if (IS_VM_NIC(card))
   1809		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
   1810				      qeth_vm_detect_layer(card);
   1811
   1812	switch (disc) {
   1813	case QETH_DISCIPLINE_LAYER2:
   1814		QETH_CARD_TEXT(card, 3, "force l2");
   1815		break;
   1816	case QETH_DISCIPLINE_LAYER3:
   1817		QETH_CARD_TEXT(card, 3, "force l3");
   1818		break;
   1819	default:
   1820		QETH_CARD_TEXT(card, 3, "force no");
   1821	}
   1822
   1823	return disc;
   1824}
   1825
   1826static void qeth_set_blkt_defaults(struct qeth_card *card)
   1827{
   1828	QETH_CARD_TEXT(card, 2, "cfgblkt");
   1829
   1830	if (card->info.use_v1_blkt) {
   1831		card->info.blkt.time_total = 0;
   1832		card->info.blkt.inter_packet = 0;
   1833		card->info.blkt.inter_packet_jumbo = 0;
   1834	} else {
   1835		card->info.blkt.time_total = 250;
   1836		card->info.blkt.inter_packet = 5;
   1837		card->info.blkt.inter_packet_jumbo = 15;
   1838	}
   1839}
   1840
   1841static void qeth_idx_init(struct qeth_card *card)
   1842{
   1843	memset(&card->seqno, 0, sizeof(card->seqno));
   1844
   1845	card->token.issuer_rm_w = 0x00010103UL;
   1846	card->token.cm_filter_w = 0x00010108UL;
   1847	card->token.cm_connection_w = 0x0001010aUL;
   1848	card->token.ulp_filter_w = 0x0001010bUL;
   1849	card->token.ulp_connection_w = 0x0001010dUL;
   1850
   1851	switch (card->info.type) {
   1852	case QETH_CARD_TYPE_IQD:
   1853		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
   1854		break;
   1855	case QETH_CARD_TYPE_OSD:
   1856		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
   1857		break;
   1858	default:
   1859		break;
   1860	}
   1861}
   1862
   1863static void qeth_idx_finalize_cmd(struct qeth_card *card,
   1864				  struct qeth_cmd_buffer *iob)
   1865{
   1866	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
   1867	       QETH_SEQ_NO_LENGTH);
   1868	if (iob->channel == &card->write)
   1869		card->seqno.trans_hdr++;
   1870}
   1871
   1872static int qeth_peer_func_level(int level)
   1873{
   1874	if ((level & 0xff) == 8)
   1875		return (level & 0xff) + 0x400;
   1876	if (((level >> 8) & 3) == 1)
   1877		return (level & 0xff) + 0x200;
   1878	return level;
   1879}
   1880
   1881static void qeth_mpc_finalize_cmd(struct qeth_card *card,
   1882				  struct qeth_cmd_buffer *iob)
   1883{
   1884	qeth_idx_finalize_cmd(card, iob);
   1885
   1886	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
   1887	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
   1888	card->seqno.pdu_hdr++;
   1889	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
   1890	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
   1891
   1892	iob->callback = qeth_release_buffer_cb;
   1893}
   1894
   1895static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
   1896				 struct qeth_cmd_buffer *reply)
   1897{
   1898	/* MPC cmds are issued strictly in sequence. */
   1899	return !IS_IPA(reply->data);
   1900}
   1901
   1902static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
   1903						  const void *data,
   1904						  unsigned int data_length)
   1905{
   1906	struct qeth_cmd_buffer *iob;
   1907
   1908	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
   1909	if (!iob)
   1910		return NULL;
   1911
   1912	memcpy(iob->data, data, data_length);
   1913	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
   1914		       iob->data);
   1915	iob->finalize = qeth_mpc_finalize_cmd;
   1916	iob->match = qeth_mpc_match_reply;
   1917	return iob;
   1918}
   1919
   1920/**
   1921 * qeth_send_control_data() -	send control command to the card
   1922 * @card:			qeth_card structure pointer
   1923 * @iob:			qeth_cmd_buffer pointer
   1924 * @reply_cb:			callback function pointer
   1925 *  cb_card:			pointer to the qeth_card structure
   1926 *  cb_reply:			pointer to the qeth_reply structure
   1927 *  cb_cmd:			pointer to the original iob for non-IPA
   1928 *				commands, or to the qeth_ipa_cmd structure
   1929 *				for the IPA commands.
   1930 * @reply_param:		private pointer passed to the callback
   1931 *
   1932 * Callback function gets called one or more times, with cb_cmd
   1933 * pointing to the response returned by the hardware. Callback
   1934 * function must return
   1935 *   > 0 if more reply blocks are expected,
   1936 *     0 if the last or only reply block is received, and
   1937 *   < 0 on error.
   1938 * Callback function can get the value of the reply_param pointer from the
   1939 * field 'param' of the structure qeth_reply.
   1940 */
   1941
   1942static int qeth_send_control_data(struct qeth_card *card,
   1943				  struct qeth_cmd_buffer *iob,
   1944				  int (*reply_cb)(struct qeth_card *cb_card,
   1945						  struct qeth_reply *cb_reply,
   1946						  unsigned long cb_cmd),
   1947				  void *reply_param)
   1948{
   1949	struct qeth_channel *channel = iob->channel;
   1950	struct qeth_reply *reply = &iob->reply;
   1951	long timeout = iob->timeout;
   1952	int rc;
   1953
   1954	QETH_CARD_TEXT(card, 2, "sendctl");
   1955
   1956	reply->callback = reply_cb;
   1957	reply->param = reply_param;
   1958
   1959	timeout = wait_event_interruptible_timeout(card->wait_q,
   1960						   qeth_trylock_channel(channel, iob),
   1961						   timeout);
   1962	if (timeout <= 0) {
   1963		qeth_put_cmd(iob);
   1964		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
   1965	}
   1966
   1967	if (iob->finalize)
   1968		iob->finalize(card, iob);
   1969	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
   1970
   1971	qeth_enqueue_cmd(card, iob);
   1972
   1973	/* This pairs with iob->callback, and keeps the iob alive after IO: */
   1974	qeth_get_cmd(iob);
   1975
   1976	QETH_CARD_TEXT(card, 6, "noirqpnd");
   1977	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
   1978	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
   1979				      (addr_t) iob, 0, 0, timeout);
   1980	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
   1981	if (rc) {
   1982		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
   1983				 CARD_DEVID(card), rc);
   1984		QETH_CARD_TEXT_(card, 2, " err%d", rc);
   1985		qeth_dequeue_cmd(card, iob);
   1986		qeth_put_cmd(iob);
   1987		qeth_unlock_channel(card, channel);
   1988		goto out;
   1989	}
   1990
   1991	timeout = wait_for_completion_interruptible_timeout(&iob->done,
   1992							    timeout);
   1993	if (timeout <= 0)
   1994		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
   1995
   1996	qeth_dequeue_cmd(card, iob);
   1997
   1998	if (reply_cb) {
   1999		/* Wait until the callback for a late reply has completed: */
   2000		spin_lock_irq(&iob->lock);
   2001		if (rc)
   2002			/* Zap any callback that's still pending: */
   2003			iob->rc = rc;
   2004		spin_unlock_irq(&iob->lock);
   2005	}
   2006
   2007	if (!rc)
   2008		rc = iob->rc;
   2009
   2010out:
   2011	qeth_put_cmd(iob);
   2012	return rc;
   2013}
   2014
   2015struct qeth_node_desc {
   2016	struct node_descriptor nd1;
   2017	struct node_descriptor nd2;
   2018	struct node_descriptor nd3;
   2019};
   2020
   2021static void qeth_read_conf_data_cb(struct qeth_card *card,
   2022				   struct qeth_cmd_buffer *iob,
   2023				   unsigned int data_length)
   2024{
   2025	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
   2026	int rc = 0;
   2027	u8 *tag;
   2028
   2029	QETH_CARD_TEXT(card, 2, "cfgunit");
   2030
   2031	if (data_length < sizeof(*nd)) {
   2032		rc = -EINVAL;
   2033		goto out;
   2034	}
   2035
   2036	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
   2037			       nd->nd1.plant[1] == _ascebc['M'];
   2038	tag = (u8 *)&nd->nd1.tag;
   2039	card->info.chpid = tag[0];
   2040	card->info.unit_addr2 = tag[1];
   2041
   2042	tag = (u8 *)&nd->nd2.tag;
   2043	card->info.cula = tag[1];
   2044
   2045	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
   2046				 nd->nd3.model[1] == 0xF0 &&
   2047				 nd->nd3.model[2] >= 0xF1 &&
   2048				 nd->nd3.model[2] <= 0xF4;
   2049
   2050out:
   2051	qeth_notify_cmd(iob, rc);
   2052	qeth_put_cmd(iob);
   2053}
   2054
   2055static int qeth_read_conf_data(struct qeth_card *card)
   2056{
   2057	struct qeth_channel *channel = &card->data;
   2058	struct qeth_cmd_buffer *iob;
   2059	struct ciw *ciw;
   2060
   2061	/* scan for RCD command in extended SenseID data */
   2062	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
   2063	if (!ciw || ciw->cmd == 0)
   2064		return -EOPNOTSUPP;
   2065	if (ciw->count < sizeof(struct qeth_node_desc))
   2066		return -EINVAL;
   2067
   2068	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
   2069	if (!iob)
   2070		return -ENOMEM;
   2071
   2072	iob->callback = qeth_read_conf_data_cb;
   2073	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
   2074		       iob->data);
   2075
   2076	return qeth_send_control_data(card, iob, NULL, NULL);
   2077}
   2078
   2079static int qeth_idx_check_activate_response(struct qeth_card *card,
   2080					    struct qeth_channel *channel,
   2081					    struct qeth_cmd_buffer *iob)
   2082{
   2083	int rc;
   2084
   2085	rc = qeth_check_idx_response(card, iob->data);
   2086	if (rc)
   2087		return rc;
   2088
   2089	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
   2090		return 0;
   2091
   2092	/* negative reply: */
   2093	QETH_CARD_TEXT_(card, 2, "idxneg%c",
   2094			QETH_IDX_ACT_CAUSE_CODE(iob->data));
   2095
   2096	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
   2097	case QETH_IDX_ACT_ERR_EXCL:
   2098		dev_err(&channel->ccwdev->dev,
   2099			"The adapter is used exclusively by another host\n");
   2100		return -EBUSY;
   2101	case QETH_IDX_ACT_ERR_AUTH:
   2102	case QETH_IDX_ACT_ERR_AUTH_USER:
   2103		dev_err(&channel->ccwdev->dev,
   2104			"Setting the device online failed because of insufficient authorization\n");
   2105		return -EPERM;
   2106	default:
   2107		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
   2108				 CCW_DEVID(channel->ccwdev));
   2109		return -EIO;
   2110	}
   2111}
   2112
   2113static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
   2114					      struct qeth_cmd_buffer *iob,
   2115					      unsigned int data_length)
   2116{
   2117	struct qeth_channel *channel = iob->channel;
   2118	u16 peer_level;
   2119	int rc;
   2120
   2121	QETH_CARD_TEXT(card, 2, "idxrdcb");
   2122
   2123	rc = qeth_idx_check_activate_response(card, channel, iob);
   2124	if (rc)
   2125		goto out;
   2126
   2127	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
   2128	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
   2129		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
   2130				 CCW_DEVID(channel->ccwdev),
   2131				 card->info.func_level, peer_level);
   2132		rc = -EINVAL;
   2133		goto out;
   2134	}
   2135
   2136	memcpy(&card->token.issuer_rm_r,
   2137	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
   2138	       QETH_MPC_TOKEN_LENGTH);
   2139	memcpy(&card->info.mcl_level[0],
   2140	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
   2141
   2142out:
   2143	qeth_notify_cmd(iob, rc);
   2144	qeth_put_cmd(iob);
   2145}
   2146
   2147static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
   2148					       struct qeth_cmd_buffer *iob,
   2149					       unsigned int data_length)
   2150{
   2151	struct qeth_channel *channel = iob->channel;
   2152	u16 peer_level;
   2153	int rc;
   2154
   2155	QETH_CARD_TEXT(card, 2, "idxwrcb");
   2156
   2157	rc = qeth_idx_check_activate_response(card, channel, iob);
   2158	if (rc)
   2159		goto out;
   2160
   2161	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
   2162	if ((peer_level & ~0x0100) !=
   2163	    qeth_peer_func_level(card->info.func_level)) {
   2164		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
   2165				 CCW_DEVID(channel->ccwdev),
   2166				 card->info.func_level, peer_level);
   2167		rc = -EINVAL;
   2168	}
   2169
   2170out:
   2171	qeth_notify_cmd(iob, rc);
   2172	qeth_put_cmd(iob);
   2173}
   2174
   2175static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
   2176					struct qeth_cmd_buffer *iob)
   2177{
   2178	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
   2179	u8 port = ((u8)card->dev->dev_port) | 0x80;
   2180	struct ccw1 *ccw = __ccw_from_cmd(iob);
   2181
   2182	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
   2183		       iob->data);
   2184	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
   2185	iob->finalize = qeth_idx_finalize_cmd;
   2186
   2187	port |= QETH_IDX_ACT_INVAL_FRAME;
   2188	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
   2189	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
   2190	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
   2191	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
   2192	       &card->info.func_level, 2);
   2193	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
   2194	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
   2195}
   2196
   2197static int qeth_idx_activate_read_channel(struct qeth_card *card)
   2198{
   2199	struct qeth_channel *channel = &card->read;
   2200	struct qeth_cmd_buffer *iob;
   2201	int rc;
   2202
   2203	QETH_CARD_TEXT(card, 2, "idxread");
   2204
   2205	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
   2206	if (!iob)
   2207		return -ENOMEM;
   2208
   2209	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
   2210	qeth_idx_setup_activate_cmd(card, iob);
   2211	iob->callback = qeth_idx_activate_read_channel_cb;
   2212
   2213	rc = qeth_send_control_data(card, iob, NULL, NULL);
   2214	if (rc)
   2215		return rc;
   2216
   2217	channel->state = CH_STATE_UP;
   2218	return 0;
   2219}
   2220
   2221static int qeth_idx_activate_write_channel(struct qeth_card *card)
   2222{
   2223	struct qeth_channel *channel = &card->write;
   2224	struct qeth_cmd_buffer *iob;
   2225	int rc;
   2226
   2227	QETH_CARD_TEXT(card, 2, "idxwrite");
   2228
   2229	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
   2230	if (!iob)
   2231		return -ENOMEM;
   2232
   2233	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
   2234	qeth_idx_setup_activate_cmd(card, iob);
   2235	iob->callback = qeth_idx_activate_write_channel_cb;
   2236
   2237	rc = qeth_send_control_data(card, iob, NULL, NULL);
   2238	if (rc)
   2239		return rc;
   2240
   2241	channel->state = CH_STATE_UP;
   2242	return 0;
   2243}
   2244
   2245static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
   2246		unsigned long data)
   2247{
   2248	struct qeth_cmd_buffer *iob;
   2249
   2250	QETH_CARD_TEXT(card, 2, "cmenblcb");
   2251
   2252	iob = (struct qeth_cmd_buffer *) data;
   2253	memcpy(&card->token.cm_filter_r,
   2254	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
   2255	       QETH_MPC_TOKEN_LENGTH);
   2256	return 0;
   2257}
   2258
   2259static int qeth_cm_enable(struct qeth_card *card)
   2260{
   2261	struct qeth_cmd_buffer *iob;
   2262
   2263	QETH_CARD_TEXT(card, 2, "cmenable");
   2264
   2265	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
   2266	if (!iob)
   2267		return -ENOMEM;
   2268
   2269	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
   2270	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
   2271	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
   2272	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
   2273
   2274	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
   2275}
   2276
   2277static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
   2278		unsigned long data)
   2279{
   2280	struct qeth_cmd_buffer *iob;
   2281
   2282	QETH_CARD_TEXT(card, 2, "cmsetpcb");
   2283
   2284	iob = (struct qeth_cmd_buffer *) data;
   2285	memcpy(&card->token.cm_connection_r,
   2286	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
   2287	       QETH_MPC_TOKEN_LENGTH);
   2288	return 0;
   2289}
   2290
   2291static int qeth_cm_setup(struct qeth_card *card)
   2292{
   2293	struct qeth_cmd_buffer *iob;
   2294
   2295	QETH_CARD_TEXT(card, 2, "cmsetup");
   2296
   2297	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
   2298	if (!iob)
   2299		return -ENOMEM;
   2300
   2301	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
   2302	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
   2303	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
   2304	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
   2305	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
   2306	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
   2307	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
   2308}
   2309
   2310static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
   2311{
   2312	if (link_type == QETH_LINK_TYPE_LANE_TR ||
   2313	    link_type == QETH_LINK_TYPE_HSTR) {
   2314		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
   2315		return false;
   2316	}
   2317
   2318	return true;
   2319}
   2320
   2321static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
   2322{
   2323	struct net_device *dev = card->dev;
   2324	unsigned int new_mtu;
   2325
   2326	if (!max_mtu) {
   2327		/* IQD needs accurate max MTU to set up its RX buffers: */
   2328		if (IS_IQD(card))
   2329			return -EINVAL;
   2330		/* tolerate quirky HW: */
   2331		max_mtu = ETH_MAX_MTU;
   2332	}
   2333
   2334	rtnl_lock();
   2335	if (IS_IQD(card)) {
   2336		/* move any device with default MTU to new max MTU: */
   2337		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
   2338
   2339		/* adjust RX buffer size to new max MTU: */
   2340		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
   2341		if (dev->max_mtu && dev->max_mtu != max_mtu)
   2342			qeth_free_qdio_queues(card);
   2343	} else {
   2344		if (dev->mtu)
   2345			new_mtu = dev->mtu;
   2346		/* default MTUs for first setup: */
   2347		else if (IS_LAYER2(card))
   2348			new_mtu = ETH_DATA_LEN;
   2349		else
   2350			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
   2351	}
   2352
   2353	dev->max_mtu = max_mtu;
   2354	dev->mtu = min(new_mtu, max_mtu);
   2355	rtnl_unlock();
   2356	return 0;
   2357}
   2358
   2359static int qeth_get_mtu_outof_framesize(int framesize)
   2360{
   2361	switch (framesize) {
   2362	case 0x4000:
   2363		return 8192;
   2364	case 0x6000:
   2365		return 16384;
   2366	case 0xa000:
   2367		return 32768;
   2368	case 0xffff:
   2369		return 57344;
   2370	default:
   2371		return 0;
   2372	}
   2373}
   2374
   2375static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
   2376		unsigned long data)
   2377{
   2378	__u16 mtu, framesize;
   2379	__u16 len;
   2380	struct qeth_cmd_buffer *iob;
   2381	u8 link_type = 0;
   2382
   2383	QETH_CARD_TEXT(card, 2, "ulpenacb");
   2384
   2385	iob = (struct qeth_cmd_buffer *) data;
   2386	memcpy(&card->token.ulp_filter_r,
   2387	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
   2388	       QETH_MPC_TOKEN_LENGTH);
   2389	if (IS_IQD(card)) {
   2390		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
   2391		mtu = qeth_get_mtu_outof_framesize(framesize);
   2392	} else {
   2393		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
   2394	}
   2395	*(u16 *)reply->param = mtu;
   2396
   2397	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
   2398	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
   2399		memcpy(&link_type,
   2400		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
   2401		if (!qeth_is_supported_link_type(card, link_type))
   2402			return -EPROTONOSUPPORT;
   2403	}
   2404
   2405	card->info.link_type = link_type;
   2406	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
   2407	return 0;
   2408}
   2409
   2410static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
   2411{
   2412	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
   2413}
   2414
   2415static int qeth_ulp_enable(struct qeth_card *card)
   2416{
   2417	u8 prot_type = qeth_mpc_select_prot_type(card);
   2418	struct qeth_cmd_buffer *iob;
   2419	u16 max_mtu;
   2420	int rc;
   2421
   2422	QETH_CARD_TEXT(card, 2, "ulpenabl");
   2423
   2424	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
   2425	if (!iob)
   2426		return -ENOMEM;
   2427
   2428	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
   2429	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
   2430	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
   2431	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
   2432	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
   2433	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
   2434	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
   2435	if (rc)
   2436		return rc;
   2437	return qeth_update_max_mtu(card, max_mtu);
   2438}
   2439
   2440static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
   2441		unsigned long data)
   2442{
   2443	struct qeth_cmd_buffer *iob;
   2444
   2445	QETH_CARD_TEXT(card, 2, "ulpstpcb");
   2446
   2447	iob = (struct qeth_cmd_buffer *) data;
   2448	memcpy(&card->token.ulp_connection_r,
   2449	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
   2450	       QETH_MPC_TOKEN_LENGTH);
   2451	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
   2452		     3)) {
   2453		QETH_CARD_TEXT(card, 2, "olmlimit");
   2454		dev_err(&card->gdev->dev, "A connection could not be "
   2455			"established because of an OLM limit\n");
   2456		return -EMLINK;
   2457	}
   2458	return 0;
   2459}
   2460
   2461static int qeth_ulp_setup(struct qeth_card *card)
   2462{
   2463	__u16 temp;
   2464	struct qeth_cmd_buffer *iob;
   2465
   2466	QETH_CARD_TEXT(card, 2, "ulpsetup");
   2467
   2468	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
   2469	if (!iob)
   2470		return -ENOMEM;
   2471
   2472	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
   2473	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
   2474	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
   2475	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
   2476	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
   2477	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
   2478
   2479	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
   2480	temp = (card->info.cula << 8) + card->info.unit_addr2;
   2481	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
   2482	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
   2483}
   2484
   2485static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
   2486			      gfp_t gfp)
   2487{
   2488	struct qeth_qdio_out_buffer *newbuf;
   2489
   2490	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
   2491	if (!newbuf)
   2492		return -ENOMEM;
   2493
   2494	newbuf->buffer = q->qdio_bufs[bidx];
   2495	skb_queue_head_init(&newbuf->skb_list);
   2496	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
   2497	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
   2498	q->bufs[bidx] = newbuf;
   2499	return 0;
   2500}
   2501
   2502static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
   2503{
   2504	if (!q)
   2505		return;
   2506
   2507	qeth_drain_output_queue(q, true);
   2508	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
   2509	kfree(q);
   2510}
   2511
   2512static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
   2513{
   2514	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
   2515	unsigned int i;
   2516
   2517	if (!q)
   2518		return NULL;
   2519
   2520	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
   2521		goto err_qdio_bufs;
   2522
   2523	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
   2524		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
   2525			goto err_out_bufs;
   2526	}
   2527
   2528	return q;
   2529
   2530err_out_bufs:
   2531	while (i > 0)
   2532		qeth_free_out_buf(q->bufs[--i]);
   2533	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
   2534err_qdio_bufs:
   2535	kfree(q);
   2536	return NULL;
   2537}
   2538
   2539static void qeth_tx_completion_timer(struct timer_list *timer)
   2540{
   2541	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
   2542
   2543	napi_schedule(&queue->napi);
   2544	QETH_TXQ_STAT_INC(queue, completion_timer);
   2545}
   2546
   2547static int qeth_alloc_qdio_queues(struct qeth_card *card)
   2548{
   2549	unsigned int i;
   2550
   2551	QETH_CARD_TEXT(card, 2, "allcqdbf");
   2552
   2553	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
   2554		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
   2555		return 0;
   2556
   2557	/* inbound buffer pool */
   2558	if (qeth_alloc_buffer_pool(card))
   2559		goto out_buffer_pool;
   2560
   2561	/* outbound */
   2562	for (i = 0; i < card->qdio.no_out_queues; ++i) {
   2563		struct qeth_qdio_out_q *queue;
   2564
   2565		queue = qeth_alloc_output_queue();
   2566		if (!queue)
   2567			goto out_freeoutq;
   2568		QETH_CARD_TEXT_(card, 2, "outq %i", i);
   2569		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
   2570		card->qdio.out_qs[i] = queue;
   2571		queue->card = card;
   2572		queue->queue_no = i;
   2573		INIT_LIST_HEAD(&queue->pending_bufs);
   2574		spin_lock_init(&queue->lock);
   2575		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
   2576		if (IS_IQD(card)) {
   2577			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
   2578			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
   2579			queue->rescan_usecs = QETH_TX_TIMER_USECS;
   2580		} else {
   2581			queue->coalesce_usecs = USEC_PER_SEC;
   2582			queue->max_coalesced_frames = 0;
   2583			queue->rescan_usecs = 10 * USEC_PER_SEC;
   2584		}
   2585		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
   2586	}
   2587
   2588	/* completion */
   2589	if (qeth_alloc_cq(card))
   2590		goto out_freeoutq;
   2591
   2592	return 0;
   2593
   2594out_freeoutq:
   2595	while (i > 0) {
   2596		qeth_free_output_queue(card->qdio.out_qs[--i]);
   2597		card->qdio.out_qs[i] = NULL;
   2598	}
   2599	qeth_free_buffer_pool(card);
   2600out_buffer_pool:
   2601	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
   2602	return -ENOMEM;
   2603}
   2604
   2605static void qeth_free_qdio_queues(struct qeth_card *card)
   2606{
   2607	int i, j;
   2608
   2609	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
   2610		QETH_QDIO_UNINITIALIZED)
   2611		return;
   2612
   2613	qeth_free_cq(card);
   2614	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
   2615		if (card->qdio.in_q->bufs[j].rx_skb) {
   2616			consume_skb(card->qdio.in_q->bufs[j].rx_skb);
   2617			card->qdio.in_q->bufs[j].rx_skb = NULL;
   2618		}
   2619	}
   2620
   2621	/* inbound buffer pool */
   2622	qeth_free_buffer_pool(card);
   2623	/* free outbound qdio_qs */
   2624	for (i = 0; i < card->qdio.no_out_queues; i++) {
   2625		qeth_free_output_queue(card->qdio.out_qs[i]);
   2626		card->qdio.out_qs[i] = NULL;
   2627	}
   2628}
   2629
   2630static void qeth_fill_qib_parms(struct qeth_card *card,
   2631				struct qeth_qib_parms *parms)
   2632{
   2633	struct qeth_qdio_out_q *queue;
   2634	unsigned int i;
   2635
   2636	parms->pcit_magic[0] = 'P';
   2637	parms->pcit_magic[1] = 'C';
   2638	parms->pcit_magic[2] = 'I';
   2639	parms->pcit_magic[3] = 'T';
   2640	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
   2641	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
   2642	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
   2643	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
   2644
   2645	parms->blkt_magic[0] = 'B';
   2646	parms->blkt_magic[1] = 'L';
   2647	parms->blkt_magic[2] = 'K';
   2648	parms->blkt_magic[3] = 'T';
   2649	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
   2650	parms->blkt_total = card->info.blkt.time_total;
   2651	parms->blkt_inter_packet = card->info.blkt.inter_packet;
   2652	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
   2653
   2654	/* Prio-queueing implicitly uses the default priorities: */
   2655	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
   2656		return;
   2657
   2658	parms->pque_magic[0] = 'P';
   2659	parms->pque_magic[1] = 'Q';
   2660	parms->pque_magic[2] = 'U';
   2661	parms->pque_magic[3] = 'E';
   2662	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
   2663	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
   2664	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
   2665
   2666	qeth_for_each_output_queue(card, queue, i)
   2667		parms->pque_priority[i] = queue->priority;
   2668}
   2669
   2670static int qeth_qdio_activate(struct qeth_card *card)
   2671{
   2672	QETH_CARD_TEXT(card, 3, "qdioact");
   2673	return qdio_activate(CARD_DDEV(card));
   2674}
   2675
   2676static int qeth_dm_act(struct qeth_card *card)
   2677{
   2678	struct qeth_cmd_buffer *iob;
   2679
   2680	QETH_CARD_TEXT(card, 2, "dmact");
   2681
   2682	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
   2683	if (!iob)
   2684		return -ENOMEM;
   2685
   2686	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
   2687	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
   2688	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
   2689	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
   2690	return qeth_send_control_data(card, iob, NULL, NULL);
   2691}
   2692
   2693static int qeth_mpc_initialize(struct qeth_card *card)
   2694{
   2695	int rc;
   2696
   2697	QETH_CARD_TEXT(card, 2, "mpcinit");
   2698
   2699	rc = qeth_issue_next_read(card);
   2700	if (rc) {
   2701		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
   2702		return rc;
   2703	}
   2704	rc = qeth_cm_enable(card);
   2705	if (rc) {
   2706		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
   2707		return rc;
   2708	}
   2709	rc = qeth_cm_setup(card);
   2710	if (rc) {
   2711		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
   2712		return rc;
   2713	}
   2714	rc = qeth_ulp_enable(card);
   2715	if (rc) {
   2716		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
   2717		return rc;
   2718	}
   2719	rc = qeth_ulp_setup(card);
   2720	if (rc) {
   2721		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
   2722		return rc;
   2723	}
   2724	rc = qeth_alloc_qdio_queues(card);
   2725	if (rc) {
   2726		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
   2727		return rc;
   2728	}
   2729	rc = qeth_qdio_establish(card);
   2730	if (rc) {
   2731		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
   2732		qeth_free_qdio_queues(card);
   2733		return rc;
   2734	}
   2735	rc = qeth_qdio_activate(card);
   2736	if (rc) {
   2737		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
   2738		return rc;
   2739	}
   2740	rc = qeth_dm_act(card);
   2741	if (rc) {
   2742		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
   2743		return rc;
   2744	}
   2745
   2746	return 0;
   2747}
   2748
   2749static void qeth_print_status_message(struct qeth_card *card)
   2750{
   2751	switch (card->info.type) {
   2752	case QETH_CARD_TYPE_OSD:
   2753	case QETH_CARD_TYPE_OSM:
   2754	case QETH_CARD_TYPE_OSX:
   2755		/* VM will use a non-zero first character
   2756		 * to indicate a HiperSockets like reporting
   2757		 * of the level OSA sets the first character to zero
   2758		 * */
   2759		if (!card->info.mcl_level[0]) {
   2760			sprintf(card->info.mcl_level, "%02x%02x",
   2761				card->info.mcl_level[2],
   2762				card->info.mcl_level[3]);
   2763			break;
   2764		}
   2765		fallthrough;
   2766	case QETH_CARD_TYPE_IQD:
   2767		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
   2768			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
   2769				card->info.mcl_level[0]];
   2770			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
   2771				card->info.mcl_level[1]];
   2772			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
   2773				card->info.mcl_level[2]];
   2774			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
   2775				card->info.mcl_level[3]];
   2776			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
   2777		}
   2778		break;
   2779	default:
   2780		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
   2781	}
   2782	dev_info(&card->gdev->dev,
   2783		 "Device is a%s card%s%s%s\nwith link type %s.\n",
   2784		 qeth_get_cardname(card),
   2785		 (card->info.mcl_level[0]) ? " (level: " : "",
   2786		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
   2787		 (card->info.mcl_level[0]) ? ")" : "",
   2788		 qeth_get_cardname_short(card));
   2789}
   2790
   2791static void qeth_initialize_working_pool_list(struct qeth_card *card)
   2792{
   2793	struct qeth_buffer_pool_entry *entry;
   2794
   2795	QETH_CARD_TEXT(card, 5, "inwrklst");
   2796
   2797	list_for_each_entry(entry,
   2798			    &card->qdio.init_pool.entry_list, init_list) {
   2799		qeth_put_buffer_pool_entry(card, entry);
   2800	}
   2801}
   2802
   2803static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
   2804					struct qeth_card *card)
   2805{
   2806	struct qeth_buffer_pool_entry *entry;
   2807	int i, free;
   2808
   2809	if (list_empty(&card->qdio.in_buf_pool.entry_list))
   2810		return NULL;
   2811
   2812	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
   2813		free = 1;
   2814		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
   2815			if (page_count(entry->elements[i]) > 1) {
   2816				free = 0;
   2817				break;
   2818			}
   2819		}
   2820		if (free) {
   2821			list_del_init(&entry->list);
   2822			return entry;
   2823		}
   2824	}
   2825
   2826	/* no free buffer in pool so take first one and swap pages */
   2827	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
   2828				 struct qeth_buffer_pool_entry, list);
   2829	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
   2830		if (page_count(entry->elements[i]) > 1) {
   2831			struct page *page = dev_alloc_page();
   2832
   2833			if (!page)
   2834				return NULL;
   2835
   2836			__free_page(entry->elements[i]);
   2837			entry->elements[i] = page;
   2838			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
   2839		}
   2840	}
   2841	list_del_init(&entry->list);
   2842	return entry;
   2843}
   2844
   2845static int qeth_init_input_buffer(struct qeth_card *card,
   2846		struct qeth_qdio_buffer *buf)
   2847{
   2848	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
   2849	int i;
   2850
   2851	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
   2852		buf->rx_skb = netdev_alloc_skb(card->dev,
   2853					       ETH_HLEN +
   2854					       sizeof(struct ipv6hdr));
   2855		if (!buf->rx_skb)
   2856			return -ENOMEM;
   2857	}
   2858
   2859	if (!pool_entry) {
   2860		pool_entry = qeth_find_free_buffer_pool_entry(card);
   2861		if (!pool_entry)
   2862			return -ENOBUFS;
   2863
   2864		buf->pool_entry = pool_entry;
   2865	}
   2866
   2867	/*
   2868	 * since the buffer is accessed only from the input_tasklet
   2869	 * there shouldn't be a need to synchronize; also, since we use
   2870	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
   2871	 * buffers
   2872	 */
   2873	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
   2874		buf->buffer->element[i].length = PAGE_SIZE;
   2875		buf->buffer->element[i].addr =
   2876			page_to_phys(pool_entry->elements[i]);
   2877		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
   2878			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
   2879		else
   2880			buf->buffer->element[i].eflags = 0;
   2881		buf->buffer->element[i].sflags = 0;
   2882	}
   2883	return 0;
   2884}
   2885
   2886static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
   2887					    struct qeth_qdio_out_q *queue)
   2888{
   2889	if (!IS_IQD(card) ||
   2890	    qeth_iqd_is_mcast_queue(card, queue) ||
   2891	    card->options.cq == QETH_CQ_ENABLED ||
   2892	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
   2893		return 1;
   2894
   2895	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
   2896}
   2897
   2898static int qeth_init_qdio_queues(struct qeth_card *card)
   2899{
   2900	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
   2901	unsigned int i;
   2902	int rc;
   2903
   2904	QETH_CARD_TEXT(card, 2, "initqdqs");
   2905
   2906	/* inbound queue */
   2907	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
   2908	memset(&card->rx, 0, sizeof(struct qeth_rx));
   2909
   2910	qeth_initialize_working_pool_list(card);
   2911	/*give only as many buffers to hardware as we have buffer pool entries*/
   2912	for (i = 0; i < rx_bufs; i++) {
   2913		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
   2914		if (rc)
   2915			return rc;
   2916	}
   2917
   2918	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
   2919	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
   2920	if (rc) {
   2921		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
   2922		return rc;
   2923	}
   2924
   2925	/* completion */
   2926	rc = qeth_cq_init(card);
   2927	if (rc) {
   2928		return rc;
   2929	}
   2930
   2931	/* outbound queue */
   2932	for (i = 0; i < card->qdio.no_out_queues; ++i) {
   2933		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
   2934
   2935		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
   2936		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
   2937		queue->next_buf_to_fill = 0;
   2938		queue->do_pack = 0;
   2939		queue->prev_hdr = NULL;
   2940		queue->coalesced_frames = 0;
   2941		queue->bulk_start = 0;
   2942		queue->bulk_count = 0;
   2943		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
   2944		atomic_set(&queue->used_buffers, 0);
   2945		atomic_set(&queue->set_pci_flags_count, 0);
   2946		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
   2947	}
   2948	return 0;
   2949}
   2950
   2951static void qeth_ipa_finalize_cmd(struct qeth_card *card,
   2952				  struct qeth_cmd_buffer *iob)
   2953{
   2954	qeth_mpc_finalize_cmd(card, iob);
   2955
   2956	/* override with IPA-specific values: */
   2957	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
   2958}
   2959
   2960static void qeth_prepare_ipa_cmd(struct qeth_card *card,
   2961				 struct qeth_cmd_buffer *iob, u16 cmd_length)
   2962{
   2963	u8 prot_type = qeth_mpc_select_prot_type(card);
   2964	u16 total_length = iob->length;
   2965
   2966	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
   2967		       iob->data);
   2968	iob->finalize = qeth_ipa_finalize_cmd;
   2969
   2970	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
   2971	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
   2972	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
   2973	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
   2974	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
   2975	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
   2976	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
   2977	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
   2978}
   2979
   2980static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
   2981				 struct qeth_cmd_buffer *reply)
   2982{
   2983	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
   2984
   2985	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
   2986}
   2987
   2988struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
   2989					   enum qeth_ipa_cmds cmd_code,
   2990					   enum qeth_prot_versions prot,
   2991					   unsigned int data_length)
   2992{
   2993	struct qeth_cmd_buffer *iob;
   2994	struct qeth_ipacmd_hdr *hdr;
   2995
   2996	data_length += offsetof(struct qeth_ipa_cmd, data);
   2997	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
   2998			     QETH_IPA_TIMEOUT);
   2999	if (!iob)
   3000		return NULL;
   3001
   3002	qeth_prepare_ipa_cmd(card, iob, data_length);
   3003	iob->match = qeth_ipa_match_reply;
   3004
   3005	hdr = &__ipa_cmd(iob)->hdr;
   3006	hdr->command = cmd_code;
   3007	hdr->initiator = IPA_CMD_INITIATOR_HOST;
   3008	/* hdr->seqno is set by qeth_send_control_data() */
   3009	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
   3010	hdr->rel_adapter_no = (u8) card->dev->dev_port;
   3011	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
   3012	hdr->param_count = 1;
   3013	hdr->prot_version = prot;
   3014	return iob;
   3015}
   3016EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
   3017
   3018static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
   3019				struct qeth_reply *reply, unsigned long data)
   3020{
   3021	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   3022
   3023	return (cmd->hdr.return_code) ? -EIO : 0;
   3024}
   3025
   3026/*
   3027 * qeth_send_ipa_cmd() - send an IPA command
   3028 *
   3029 * See qeth_send_control_data() for explanation of the arguments.
   3030 */
   3031
   3032int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
   3033		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
   3034			unsigned long),
   3035		void *reply_param)
   3036{
   3037	int rc;
   3038
   3039	QETH_CARD_TEXT(card, 4, "sendipa");
   3040
   3041	if (card->read_or_write_problem) {
   3042		qeth_put_cmd(iob);
   3043		return -EIO;
   3044	}
   3045
   3046	if (reply_cb == NULL)
   3047		reply_cb = qeth_send_ipa_cmd_cb;
   3048	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
   3049	if (rc == -ETIME) {
   3050		qeth_clear_ipacmd_list(card);
   3051		qeth_schedule_recovery(card);
   3052	}
   3053	return rc;
   3054}
   3055EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
   3056
   3057static int qeth_send_startlan_cb(struct qeth_card *card,
   3058				 struct qeth_reply *reply, unsigned long data)
   3059{
   3060	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   3061
   3062	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
   3063		return -ENETDOWN;
   3064
   3065	return (cmd->hdr.return_code) ? -EIO : 0;
   3066}
   3067
   3068static int qeth_send_startlan(struct qeth_card *card)
   3069{
   3070	struct qeth_cmd_buffer *iob;
   3071
   3072	QETH_CARD_TEXT(card, 2, "strtlan");
   3073
   3074	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
   3075	if (!iob)
   3076		return -ENOMEM;
   3077	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
   3078}
   3079
   3080static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
   3081{
   3082	if (!cmd->hdr.return_code)
   3083		cmd->hdr.return_code =
   3084			cmd->data.setadapterparms.hdr.return_code;
   3085	return cmd->hdr.return_code;
   3086}
   3087
   3088static int qeth_query_setadapterparms_cb(struct qeth_card *card,
   3089		struct qeth_reply *reply, unsigned long data)
   3090{
   3091	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   3092	struct qeth_query_cmds_supp *query_cmd;
   3093
   3094	QETH_CARD_TEXT(card, 3, "quyadpcb");
   3095	if (qeth_setadpparms_inspect_rc(cmd))
   3096		return -EIO;
   3097
   3098	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
   3099	if (query_cmd->lan_type & 0x7f) {
   3100		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
   3101			return -EPROTONOSUPPORT;
   3102
   3103		card->info.link_type = query_cmd->lan_type;
   3104		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
   3105	}
   3106
   3107	card->options.adp.supported = query_cmd->supported_cmds;
   3108	return 0;
   3109}
   3110
   3111static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
   3112						    enum qeth_ipa_setadp_cmd adp_cmd,
   3113						    unsigned int data_length)
   3114{
   3115	struct qeth_ipacmd_setadpparms_hdr *hdr;
   3116	struct qeth_cmd_buffer *iob;
   3117
   3118	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
   3119				 data_length +
   3120				 offsetof(struct qeth_ipacmd_setadpparms,
   3121					  data));
   3122	if (!iob)
   3123		return NULL;
   3124
   3125	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
   3126	hdr->cmdlength = sizeof(*hdr) + data_length;
   3127	hdr->command_code = adp_cmd;
   3128	hdr->used_total = 1;
   3129	hdr->seq_no = 1;
   3130	return iob;
   3131}
   3132
   3133static int qeth_query_setadapterparms(struct qeth_card *card)
   3134{
   3135	int rc;
   3136	struct qeth_cmd_buffer *iob;
   3137
   3138	QETH_CARD_TEXT(card, 3, "queryadp");
   3139	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
   3140				   SETADP_DATA_SIZEOF(query_cmds_supp));
   3141	if (!iob)
   3142		return -ENOMEM;
   3143	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
   3144	return rc;
   3145}
   3146
   3147static int qeth_query_ipassists_cb(struct qeth_card *card,
   3148		struct qeth_reply *reply, unsigned long data)
   3149{
   3150	struct qeth_ipa_cmd *cmd;
   3151
   3152	QETH_CARD_TEXT(card, 2, "qipasscb");
   3153
   3154	cmd = (struct qeth_ipa_cmd *) data;
   3155
   3156	switch (cmd->hdr.return_code) {
   3157	case IPA_RC_SUCCESS:
   3158		break;
   3159	case IPA_RC_NOTSUPP:
   3160	case IPA_RC_L2_UNSUPPORTED_CMD:
   3161		QETH_CARD_TEXT(card, 2, "ipaunsup");
   3162		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
   3163		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
   3164		return -EOPNOTSUPP;
   3165	default:
   3166		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
   3167				 CARD_DEVID(card), cmd->hdr.return_code);
   3168		return -EIO;
   3169	}
   3170
   3171	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
   3172		card->options.ipa4 = cmd->hdr.assists;
   3173	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
   3174		card->options.ipa6 = cmd->hdr.assists;
   3175	else
   3176		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
   3177				 CARD_DEVID(card));
   3178	return 0;
   3179}
   3180
   3181static int qeth_query_ipassists(struct qeth_card *card,
   3182				enum qeth_prot_versions prot)
   3183{
   3184	int rc;
   3185	struct qeth_cmd_buffer *iob;
   3186
   3187	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
   3188	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
   3189	if (!iob)
   3190		return -ENOMEM;
   3191	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
   3192	return rc;
   3193}
   3194
   3195static int qeth_query_switch_attributes_cb(struct qeth_card *card,
   3196				struct qeth_reply *reply, unsigned long data)
   3197{
   3198	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   3199	struct qeth_query_switch_attributes *attrs;
   3200	struct qeth_switch_info *sw_info;
   3201
   3202	QETH_CARD_TEXT(card, 2, "qswiatcb");
   3203	if (qeth_setadpparms_inspect_rc(cmd))
   3204		return -EIO;
   3205
   3206	sw_info = (struct qeth_switch_info *)reply->param;
   3207	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
   3208	sw_info->capabilities = attrs->capabilities;
   3209	sw_info->settings = attrs->settings;
   3210	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
   3211			sw_info->settings);
   3212	return 0;
   3213}
   3214
   3215int qeth_query_switch_attributes(struct qeth_card *card,
   3216				 struct qeth_switch_info *sw_info)
   3217{
   3218	struct qeth_cmd_buffer *iob;
   3219
   3220	QETH_CARD_TEXT(card, 2, "qswiattr");
   3221	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
   3222		return -EOPNOTSUPP;
   3223	if (!netif_carrier_ok(card->dev))
   3224		return -ENOMEDIUM;
   3225	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
   3226	if (!iob)
   3227		return -ENOMEM;
   3228	return qeth_send_ipa_cmd(card, iob,
   3229				qeth_query_switch_attributes_cb, sw_info);
   3230}
   3231
   3232struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
   3233					  enum qeth_diags_cmds sub_cmd,
   3234					  unsigned int data_length)
   3235{
   3236	struct qeth_ipacmd_diagass *cmd;
   3237	struct qeth_cmd_buffer *iob;
   3238
   3239	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
   3240				 DIAG_HDR_LEN + data_length);
   3241	if (!iob)
   3242		return NULL;
   3243
   3244	cmd = &__ipa_cmd(iob)->data.diagass;
   3245	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
   3246	cmd->subcmd = sub_cmd;
   3247	return iob;
   3248}
   3249EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
   3250
   3251static int qeth_query_setdiagass_cb(struct qeth_card *card,
   3252		struct qeth_reply *reply, unsigned long data)
   3253{
   3254	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   3255	u16 rc = cmd->hdr.return_code;
   3256
   3257	if (rc) {
   3258		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
   3259		return -EIO;
   3260	}
   3261
   3262	card->info.diagass_support = cmd->data.diagass.ext;
   3263	return 0;
   3264}
   3265
   3266static int qeth_query_setdiagass(struct qeth_card *card)
   3267{
   3268	struct qeth_cmd_buffer *iob;
   3269
   3270	QETH_CARD_TEXT(card, 2, "qdiagass");
   3271	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
   3272	if (!iob)
   3273		return -ENOMEM;
   3274	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
   3275}
   3276
   3277static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
   3278{
   3279	unsigned long info = get_zeroed_page(GFP_KERNEL);
   3280	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
   3281	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
   3282	struct ccw_dev_id ccwid;
   3283	int level;
   3284
   3285	tid->chpid = card->info.chpid;
   3286	ccw_device_get_id(CARD_RDEV(card), &ccwid);
   3287	tid->ssid = ccwid.ssid;
   3288	tid->devno = ccwid.devno;
   3289	if (!info)
   3290		return;
   3291	level = stsi(NULL, 0, 0, 0);
   3292	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
   3293		tid->lparnr = info222->lpar_number;
   3294	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
   3295		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
   3296		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
   3297	}
   3298	free_page(info);
   3299}
   3300
   3301static int qeth_hw_trap_cb(struct qeth_card *card,
   3302		struct qeth_reply *reply, unsigned long data)
   3303{
   3304	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   3305	u16 rc = cmd->hdr.return_code;
   3306
   3307	if (rc) {
   3308		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
   3309		return -EIO;
   3310	}
   3311	return 0;
   3312}
   3313
   3314int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
   3315{
   3316	struct qeth_cmd_buffer *iob;
   3317	struct qeth_ipa_cmd *cmd;
   3318
   3319	QETH_CARD_TEXT(card, 2, "diagtrap");
   3320	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
   3321	if (!iob)
   3322		return -ENOMEM;
   3323	cmd = __ipa_cmd(iob);
   3324	cmd->data.diagass.type = 1;
   3325	cmd->data.diagass.action = action;
   3326	switch (action) {
   3327	case QETH_DIAGS_TRAP_ARM:
   3328		cmd->data.diagass.options = 0x0003;
   3329		cmd->data.diagass.ext = 0x00010000 +
   3330			sizeof(struct qeth_trap_id);
   3331		qeth_get_trap_id(card,
   3332			(struct qeth_trap_id *)cmd->data.diagass.cdata);
   3333		break;
   3334	case QETH_DIAGS_TRAP_DISARM:
   3335		cmd->data.diagass.options = 0x0001;
   3336		break;
   3337	case QETH_DIAGS_TRAP_CAPTURE:
   3338		break;
   3339	}
   3340	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
   3341}
   3342
   3343static int qeth_check_qdio_errors(struct qeth_card *card,
   3344				  struct qdio_buffer *buf,
   3345				  unsigned int qdio_error,
   3346				  const char *dbftext)
   3347{
   3348	if (qdio_error) {
   3349		QETH_CARD_TEXT(card, 2, dbftext);
   3350		QETH_CARD_TEXT_(card, 2, " F15=%02X",
   3351			       buf->element[15].sflags);
   3352		QETH_CARD_TEXT_(card, 2, " F14=%02X",
   3353			       buf->element[14].sflags);
   3354		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
   3355		if ((buf->element[15].sflags) == 0x12) {
   3356			QETH_CARD_STAT_INC(card, rx_fifo_errors);
   3357			return 0;
   3358		} else
   3359			return 1;
   3360	}
   3361	return 0;
   3362}
   3363
   3364static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
   3365					 unsigned int count)
   3366{
   3367	struct qeth_qdio_q *queue = card->qdio.in_q;
   3368	struct list_head *lh;
   3369	int i;
   3370	int rc;
   3371	int newcount = 0;
   3372
   3373	/* only requeue at a certain threshold to avoid SIGAs */
   3374	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
   3375		for (i = queue->next_buf_to_init;
   3376		     i < queue->next_buf_to_init + count; ++i) {
   3377			if (qeth_init_input_buffer(card,
   3378				&queue->bufs[QDIO_BUFNR(i)])) {
   3379				break;
   3380			} else {
   3381				newcount++;
   3382			}
   3383		}
   3384
   3385		if (newcount < count) {
   3386			/* we are in memory shortage so we switch back to
   3387			   traditional skb allocation and drop packages */
   3388			atomic_set(&card->force_alloc_skb, 3);
   3389			count = newcount;
   3390		} else {
   3391			atomic_add_unless(&card->force_alloc_skb, -1, 0);
   3392		}
   3393
   3394		if (!count) {
   3395			i = 0;
   3396			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
   3397				i++;
   3398			if (i == card->qdio.in_buf_pool.buf_count) {
   3399				QETH_CARD_TEXT(card, 2, "qsarbw");
   3400				schedule_delayed_work(
   3401					&card->buffer_reclaim_work,
   3402					QETH_RECLAIM_WORK_TIME);
   3403			}
   3404			return 0;
   3405		}
   3406
   3407		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
   3408						  queue->next_buf_to_init,
   3409						  count);
   3410		if (rc) {
   3411			QETH_CARD_TEXT(card, 2, "qinberr");
   3412		}
   3413		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
   3414						     count);
   3415		return count;
   3416	}
   3417
   3418	return 0;
   3419}
   3420
   3421static void qeth_buffer_reclaim_work(struct work_struct *work)
   3422{
   3423	struct qeth_card *card = container_of(to_delayed_work(work),
   3424					      struct qeth_card,
   3425					      buffer_reclaim_work);
   3426
   3427	local_bh_disable();
   3428	napi_schedule(&card->napi);
   3429	/* kick-start the NAPI softirq: */
   3430	local_bh_enable();
   3431}
   3432
   3433static void qeth_handle_send_error(struct qeth_card *card,
   3434		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
   3435{
   3436	int sbalf15 = buffer->buffer->element[15].sflags;
   3437
   3438	QETH_CARD_TEXT(card, 6, "hdsnderr");
   3439	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
   3440
   3441	if (!qdio_err)
   3442		return;
   3443
   3444	if ((sbalf15 >= 15) && (sbalf15 <= 31))
   3445		return;
   3446
   3447	QETH_CARD_TEXT(card, 1, "lnkfail");
   3448	QETH_CARD_TEXT_(card, 1, "%04x %02x",
   3449		       (u16)qdio_err, (u8)sbalf15);
   3450}
   3451
   3452/**
   3453 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
   3454 * @queue: queue to check for packing buffer
   3455 *
   3456 * Returns number of buffers that were prepared for flush.
   3457 */
   3458static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
   3459{
   3460	struct qeth_qdio_out_buffer *buffer;
   3461
   3462	buffer = queue->bufs[queue->next_buf_to_fill];
   3463	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
   3464	    (buffer->next_element_to_fill > 0)) {
   3465		/* it's a packing buffer */
   3466		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
   3467		queue->next_buf_to_fill =
   3468			QDIO_BUFNR(queue->next_buf_to_fill + 1);
   3469		return 1;
   3470	}
   3471	return 0;
   3472}
   3473
   3474/*
   3475 * Switched to packing state if the number of used buffers on a queue
   3476 * reaches a certain limit.
   3477 */
   3478static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
   3479{
   3480	if (!queue->do_pack) {
   3481		if (atomic_read(&queue->used_buffers)
   3482		    >= QETH_HIGH_WATERMARK_PACK){
   3483			/* switch non-PACKING -> PACKING */
   3484			QETH_CARD_TEXT(queue->card, 6, "np->pack");
   3485			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
   3486			queue->do_pack = 1;
   3487		}
   3488	}
   3489}
   3490
   3491/*
   3492 * Switches from packing to non-packing mode. If there is a packing
   3493 * buffer on the queue this buffer will be prepared to be flushed.
   3494 * In that case 1 is returned to inform the caller. If no buffer
   3495 * has to be flushed, zero is returned.
   3496 */
   3497static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
   3498{
   3499	if (queue->do_pack) {
   3500		if (atomic_read(&queue->used_buffers)
   3501		    <= QETH_LOW_WATERMARK_PACK) {
   3502			/* switch PACKING -> non-PACKING */
   3503			QETH_CARD_TEXT(queue->card, 6, "pack->np");
   3504			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
   3505			queue->do_pack = 0;
   3506			return qeth_prep_flush_pack_buffer(queue);
   3507		}
   3508	}
   3509	return 0;
   3510}
   3511
   3512static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
   3513			       int count)
   3514{
   3515	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
   3516	struct qeth_card *card = queue->card;
   3517	unsigned int frames, usecs;
   3518	struct qaob *aob = NULL;
   3519	int rc;
   3520	int i;
   3521
   3522	for (i = index; i < index + count; ++i) {
   3523		unsigned int bidx = QDIO_BUFNR(i);
   3524		struct sk_buff *skb;
   3525
   3526		buf = queue->bufs[bidx];
   3527		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
   3528				SBAL_EFLAGS_LAST_ENTRY;
   3529		queue->coalesced_frames += buf->frames;
   3530
   3531		if (IS_IQD(card)) {
   3532			skb_queue_walk(&buf->skb_list, skb)
   3533				skb_tx_timestamp(skb);
   3534		}
   3535	}
   3536
   3537	if (IS_IQD(card)) {
   3538		if (card->options.cq == QETH_CQ_ENABLED &&
   3539		    !qeth_iqd_is_mcast_queue(card, queue) &&
   3540		    count == 1) {
   3541			if (!buf->aob)
   3542				buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
   3543							     GFP_ATOMIC);
   3544			if (buf->aob) {
   3545				struct qeth_qaob_priv1 *priv;
   3546
   3547				aob = buf->aob;
   3548				priv = (struct qeth_qaob_priv1 *)&aob->user1;
   3549				priv->state = QETH_QAOB_ISSUED;
   3550				priv->queue_no = queue->queue_no;
   3551			}
   3552		}
   3553	} else {
   3554		if (!queue->do_pack) {
   3555			if ((atomic_read(&queue->used_buffers) >=
   3556				(QETH_HIGH_WATERMARK_PACK -
   3557				 QETH_WATERMARK_PACK_FUZZ)) &&
   3558			    !atomic_read(&queue->set_pci_flags_count)) {
   3559				/* it's likely that we'll go to packing
   3560				 * mode soon */
   3561				atomic_inc(&queue->set_pci_flags_count);
   3562				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
   3563			}
   3564		} else {
   3565			if (!atomic_read(&queue->set_pci_flags_count)) {
   3566				/*
   3567				 * there's no outstanding PCI any more, so we
   3568				 * have to request a PCI to be sure the the PCI
   3569				 * will wake at some time in the future then we
   3570				 * can flush packed buffers that might still be
   3571				 * hanging around, which can happen if no
   3572				 * further send was requested by the stack
   3573				 */
   3574				atomic_inc(&queue->set_pci_flags_count);
   3575				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
   3576			}
   3577		}
   3578	}
   3579
   3580	QETH_TXQ_STAT_INC(queue, doorbell);
   3581	rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
   3582					   index, count, aob);
   3583
   3584	switch (rc) {
   3585	case 0:
   3586	case -ENOBUFS:
   3587		/* ignore temporary SIGA errors without busy condition */
   3588
   3589		/* Fake the TX completion interrupt: */
   3590		frames = READ_ONCE(queue->max_coalesced_frames);
   3591		usecs = READ_ONCE(queue->coalesce_usecs);
   3592
   3593		if (frames && queue->coalesced_frames >= frames) {
   3594			napi_schedule(&queue->napi);
   3595			queue->coalesced_frames = 0;
   3596			QETH_TXQ_STAT_INC(queue, coal_frames);
   3597		} else if (qeth_use_tx_irqs(card) &&
   3598			   atomic_read(&queue->used_buffers) >= 32) {
   3599			/* Old behaviour carried over from the qdio layer: */
   3600			napi_schedule(&queue->napi);
   3601			QETH_TXQ_STAT_INC(queue, coal_frames);
   3602		} else if (usecs) {
   3603			qeth_tx_arm_timer(queue, usecs);
   3604		}
   3605
   3606		break;
   3607	default:
   3608		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
   3609		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
   3610		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
   3611		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
   3612		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
   3613
   3614		/* this must not happen under normal circumstances. if it
   3615		 * happens something is really wrong -> recover */
   3616		qeth_schedule_recovery(queue->card);
   3617	}
   3618}
   3619
   3620static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
   3621{
   3622	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
   3623
   3624	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
   3625	queue->prev_hdr = NULL;
   3626	queue->bulk_count = 0;
   3627}
   3628
   3629static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
   3630{
   3631	/*
   3632	 * check if weed have to switch to non-packing mode or if
   3633	 * we have to get a pci flag out on the queue
   3634	 */
   3635	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
   3636	    !atomic_read(&queue->set_pci_flags_count)) {
   3637		unsigned int index, flush_cnt;
   3638
   3639		spin_lock(&queue->lock);
   3640
   3641		index = queue->next_buf_to_fill;
   3642
   3643		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
   3644		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
   3645			flush_cnt = qeth_prep_flush_pack_buffer(queue);
   3646
   3647		if (flush_cnt) {
   3648			qeth_flush_buffers(queue, index, flush_cnt);
   3649			QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
   3650		}
   3651
   3652		spin_unlock(&queue->lock);
   3653	}
   3654}
   3655
   3656static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
   3657{
   3658	struct qeth_card *card = (struct qeth_card *)card_ptr;
   3659
   3660	napi_schedule_irqoff(&card->napi);
   3661}
   3662
   3663int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
   3664{
   3665	int rc;
   3666
   3667	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
   3668		rc = -1;
   3669		goto out;
   3670	} else {
   3671		if (card->options.cq == cq) {
   3672			rc = 0;
   3673			goto out;
   3674		}
   3675
   3676		qeth_free_qdio_queues(card);
   3677		card->options.cq = cq;
   3678		rc = 0;
   3679	}
   3680out:
   3681	return rc;
   3682
   3683}
   3684EXPORT_SYMBOL_GPL(qeth_configure_cq);
   3685
   3686static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
   3687{
   3688	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
   3689	unsigned int queue_no = priv->queue_no;
   3690
   3691	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
   3692
   3693	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
   3694	    queue_no < card->qdio.no_out_queues)
   3695		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
   3696}
   3697
   3698static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
   3699				 unsigned int queue, int first_element,
   3700				 int count)
   3701{
   3702	struct qeth_qdio_q *cq = card->qdio.c_q;
   3703	int i;
   3704	int rc;
   3705
   3706	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
   3707	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
   3708	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
   3709
   3710	if (qdio_err) {
   3711		netif_tx_stop_all_queues(card->dev);
   3712		qeth_schedule_recovery(card);
   3713		return;
   3714	}
   3715
   3716	for (i = first_element; i < first_element + count; ++i) {
   3717		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
   3718		int e = 0;
   3719
   3720		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
   3721		       buffer->element[e].addr) {
   3722			unsigned long phys_aob_addr = buffer->element[e].addr;
   3723
   3724			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
   3725			++e;
   3726		}
   3727		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
   3728	}
   3729	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
   3730					  cq->next_buf_to_init, count);
   3731	if (rc) {
   3732		dev_warn(&card->gdev->dev,
   3733			"QDIO reported an error, rc=%i\n", rc);
   3734		QETH_CARD_TEXT(card, 2, "qcqherr");
   3735	}
   3736
   3737	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
   3738}
   3739
   3740static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
   3741				    unsigned int qdio_err, int queue,
   3742				    int first_elem, int count,
   3743				    unsigned long card_ptr)
   3744{
   3745	struct qeth_card *card = (struct qeth_card *)card_ptr;
   3746
   3747	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
   3748	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
   3749
   3750	if (qdio_err)
   3751		qeth_schedule_recovery(card);
   3752}
   3753
   3754static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
   3755				     unsigned int qdio_error, int __queue,
   3756				     int first_element, int count,
   3757				     unsigned long card_ptr)
   3758{
   3759	struct qeth_card *card        = (struct qeth_card *) card_ptr;
   3760
   3761	QETH_CARD_TEXT(card, 2, "achkcond");
   3762	netif_tx_stop_all_queues(card->dev);
   3763	qeth_schedule_recovery(card);
   3764}
   3765
   3766/*
   3767 * Note: Function assumes that we have 4 outbound queues.
   3768 */
   3769static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
   3770{
   3771	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
   3772	u8 tos;
   3773
   3774	switch (card->qdio.do_prio_queueing) {
   3775	case QETH_PRIO_Q_ING_TOS:
   3776	case QETH_PRIO_Q_ING_PREC:
   3777		switch (vlan_get_protocol(skb)) {
   3778		case htons(ETH_P_IP):
   3779			tos = ipv4_get_dsfield(ip_hdr(skb));
   3780			break;
   3781		case htons(ETH_P_IPV6):
   3782			tos = ipv6_get_dsfield(ipv6_hdr(skb));
   3783			break;
   3784		default:
   3785			return card->qdio.default_out_queue;
   3786		}
   3787		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
   3788			return ~tos >> 6 & 3;
   3789		if (tos & IPTOS_MINCOST)
   3790			return 3;
   3791		if (tos & IPTOS_RELIABILITY)
   3792			return 2;
   3793		if (tos & IPTOS_THROUGHPUT)
   3794			return 1;
   3795		if (tos & IPTOS_LOWDELAY)
   3796			return 0;
   3797		break;
   3798	case QETH_PRIO_Q_ING_SKB:
   3799		if (skb->priority > 5)
   3800			return 0;
   3801		return ~skb->priority >> 1 & 3;
   3802	case QETH_PRIO_Q_ING_VLAN:
   3803		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
   3804			return ~ntohs(veth->h_vlan_TCI) >>
   3805			       (VLAN_PRIO_SHIFT + 1) & 3;
   3806		break;
   3807	case QETH_PRIO_Q_ING_FIXED:
   3808		return card->qdio.default_out_queue;
   3809	default:
   3810		break;
   3811	}
   3812	return card->qdio.default_out_queue;
   3813}
   3814
   3815/**
   3816 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
   3817 * @skb:				SKB address
   3818 *
   3819 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
   3820 * fragmented part of the SKB. Returns zero for linear SKB.
   3821 */
   3822static int qeth_get_elements_for_frags(struct sk_buff *skb)
   3823{
   3824	int cnt, elements = 0;
   3825
   3826	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
   3827		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
   3828
   3829		elements += qeth_get_elements_for_range(
   3830			(addr_t)skb_frag_address(frag),
   3831			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
   3832	}
   3833	return elements;
   3834}
   3835
   3836/**
   3837 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
   3838 *				to transmit an skb.
   3839 * @skb:			the skb to operate on.
   3840 * @data_offset:		skip this part of the skb's linear data
   3841 *
   3842 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
   3843 * skb's data (both its linear part and paged fragments).
   3844 */
   3845static unsigned int qeth_count_elements(struct sk_buff *skb,
   3846					unsigned int data_offset)
   3847{
   3848	unsigned int elements = qeth_get_elements_for_frags(skb);
   3849	addr_t end = (addr_t)skb->data + skb_headlen(skb);
   3850	addr_t start = (addr_t)skb->data + data_offset;
   3851
   3852	if (start != end)
   3853		elements += qeth_get_elements_for_range(start, end);
   3854	return elements;
   3855}
   3856
   3857#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
   3858					 MAX_TCP_HEADER)
   3859
   3860/**
   3861 * qeth_add_hw_header() - add a HW header to an skb.
   3862 * @queue: TX queue that the skb will be placed on.
   3863 * @skb: skb that the HW header should be added to.
   3864 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
   3865 *	 it contains a valid pointer to a qeth_hdr.
   3866 * @hdr_len: length of the HW header.
   3867 * @proto_len: length of protocol headers that need to be in same page as the
   3868 *	       HW header.
   3869 * @elements: returns the required number of buffer elements for this skb.
   3870 *
   3871 * Returns the pushed length. If the header can't be pushed on
   3872 * (eg. because it would cross a page boundary), it is allocated from
   3873 * the cache instead and 0 is returned.
   3874 * The number of needed buffer elements is returned in @elements.
   3875 * Error to create the hdr is indicated by returning with < 0.
   3876 */
   3877static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
   3878			      struct sk_buff *skb, struct qeth_hdr **hdr,
   3879			      unsigned int hdr_len, unsigned int proto_len,
   3880			      unsigned int *elements)
   3881{
   3882	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
   3883	const unsigned int contiguous = proto_len ? proto_len : 1;
   3884	const unsigned int max_elements = queue->max_elements;
   3885	unsigned int __elements;
   3886	addr_t start, end;
   3887	bool push_ok;
   3888	int rc;
   3889
   3890check_layout:
   3891	start = (addr_t)skb->data - hdr_len;
   3892	end = (addr_t)skb->data;
   3893
   3894	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
   3895		/* Push HW header into same page as first protocol header. */
   3896		push_ok = true;
   3897		/* ... but TSO always needs a separate element for headers: */
   3898		if (skb_is_gso(skb))
   3899			__elements = 1 + qeth_count_elements(skb, proto_len);
   3900		else
   3901			__elements = qeth_count_elements(skb, 0);
   3902	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
   3903		/* Push HW header into preceding page, flush with skb->data. */
   3904		push_ok = true;
   3905		__elements = 1 + qeth_count_elements(skb, 0);
   3906	} else {
   3907		/* Use header cache, copy protocol headers up. */
   3908		push_ok = false;
   3909		__elements = 1 + qeth_count_elements(skb, proto_len);
   3910	}
   3911
   3912	/* Compress skb to fit into one IO buffer: */
   3913	if (__elements > max_elements) {
   3914		if (!skb_is_nonlinear(skb)) {
   3915			/* Drop it, no easy way of shrinking it further. */
   3916			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
   3917					 max_elements, __elements, skb->len);
   3918			return -E2BIG;
   3919		}
   3920
   3921		rc = skb_linearize(skb);
   3922		if (rc) {
   3923			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
   3924			return rc;
   3925		}
   3926
   3927		QETH_TXQ_STAT_INC(queue, skbs_linearized);
   3928		/* Linearization changed the layout, re-evaluate: */
   3929		goto check_layout;
   3930	}
   3931
   3932	*elements = __elements;
   3933	/* Add the header: */
   3934	if (push_ok) {
   3935		*hdr = skb_push(skb, hdr_len);
   3936		return hdr_len;
   3937	}
   3938
   3939	/* Fall back to cache element with known-good alignment: */
   3940	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
   3941		return -E2BIG;
   3942	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
   3943	if (!*hdr)
   3944		return -ENOMEM;
   3945	/* Copy protocol headers behind HW header: */
   3946	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
   3947	return 0;
   3948}
   3949
   3950static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
   3951			      struct sk_buff *curr_skb,
   3952			      struct qeth_hdr *curr_hdr)
   3953{
   3954	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
   3955	struct qeth_hdr *prev_hdr = queue->prev_hdr;
   3956
   3957	if (!prev_hdr)
   3958		return true;
   3959
   3960	/* All packets must have the same target: */
   3961	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
   3962		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
   3963
   3964		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
   3965					eth_hdr(curr_skb)->h_dest) &&
   3966		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
   3967	}
   3968
   3969	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
   3970	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
   3971}
   3972
   3973/**
   3974 * qeth_fill_buffer() - map skb into an output buffer
   3975 * @buf:	buffer to transport the skb
   3976 * @skb:	skb to map into the buffer
   3977 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
   3978 *		from qeth_core_header_cache.
   3979 * @offset:	when mapping the skb, start at skb->data + offset
   3980 * @hd_len:	if > 0, build a dedicated header element of this size
   3981 */
   3982static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
   3983				     struct sk_buff *skb, struct qeth_hdr *hdr,
   3984				     unsigned int offset, unsigned int hd_len)
   3985{
   3986	struct qdio_buffer *buffer = buf->buffer;
   3987	int element = buf->next_element_to_fill;
   3988	int length = skb_headlen(skb) - offset;
   3989	char *data = skb->data + offset;
   3990	unsigned int elem_length, cnt;
   3991	bool is_first_elem = true;
   3992
   3993	__skb_queue_tail(&buf->skb_list, skb);
   3994
   3995	/* build dedicated element for HW Header */
   3996	if (hd_len) {
   3997		is_first_elem = false;
   3998
   3999		buffer->element[element].addr = virt_to_phys(hdr);
   4000		buffer->element[element].length = hd_len;
   4001		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
   4002
   4003		/* HW header is allocated from cache: */
   4004		if ((void *)hdr != skb->data)
   4005			__set_bit(element, buf->from_kmem_cache);
   4006		/* HW header was pushed and is contiguous with linear part: */
   4007		else if (length > 0 && !PAGE_ALIGNED(data) &&
   4008			 (data == (char *)hdr + hd_len))
   4009			buffer->element[element].eflags |=
   4010				SBAL_EFLAGS_CONTIGUOUS;
   4011
   4012		element++;
   4013	}
   4014
   4015	/* map linear part into buffer element(s) */
   4016	while (length > 0) {
   4017		elem_length = min_t(unsigned int, length,
   4018				    PAGE_SIZE - offset_in_page(data));
   4019
   4020		buffer->element[element].addr = virt_to_phys(data);
   4021		buffer->element[element].length = elem_length;
   4022		length -= elem_length;
   4023		if (is_first_elem) {
   4024			is_first_elem = false;
   4025			if (length || skb_is_nonlinear(skb))
   4026				/* skb needs additional elements */
   4027				buffer->element[element].eflags =
   4028					SBAL_EFLAGS_FIRST_FRAG;
   4029			else
   4030				buffer->element[element].eflags = 0;
   4031		} else {
   4032			buffer->element[element].eflags =
   4033				SBAL_EFLAGS_MIDDLE_FRAG;
   4034		}
   4035
   4036		data += elem_length;
   4037		element++;
   4038	}
   4039
   4040	/* map page frags into buffer element(s) */
   4041	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
   4042		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
   4043
   4044		data = skb_frag_address(frag);
   4045		length = skb_frag_size(frag);
   4046		while (length > 0) {
   4047			elem_length = min_t(unsigned int, length,
   4048					    PAGE_SIZE - offset_in_page(data));
   4049
   4050			buffer->element[element].addr = virt_to_phys(data);
   4051			buffer->element[element].length = elem_length;
   4052			buffer->element[element].eflags =
   4053				SBAL_EFLAGS_MIDDLE_FRAG;
   4054
   4055			length -= elem_length;
   4056			data += elem_length;
   4057			element++;
   4058		}
   4059	}
   4060
   4061	if (buffer->element[element - 1].eflags)
   4062		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
   4063	buf->next_element_to_fill = element;
   4064	return element;
   4065}
   4066
   4067static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
   4068		       struct sk_buff *skb, unsigned int elements,
   4069		       struct qeth_hdr *hdr, unsigned int offset,
   4070		       unsigned int hd_len)
   4071{
   4072	unsigned int bytes = qdisc_pkt_len(skb);
   4073	struct qeth_qdio_out_buffer *buffer;
   4074	unsigned int next_element;
   4075	struct netdev_queue *txq;
   4076	bool stopped = false;
   4077	bool flush;
   4078
   4079	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
   4080	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
   4081
   4082	/* Just a sanity check, the wake/stop logic should ensure that we always
   4083	 * get a free buffer.
   4084	 */
   4085	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
   4086		return -EBUSY;
   4087
   4088	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
   4089
   4090	if (flush ||
   4091	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
   4092		if (buffer->next_element_to_fill > 0) {
   4093			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
   4094			queue->bulk_count++;
   4095		}
   4096
   4097		if (queue->bulk_count >= queue->bulk_max)
   4098			flush = true;
   4099
   4100		if (flush)
   4101			qeth_flush_queue(queue);
   4102
   4103		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
   4104						queue->bulk_count)];
   4105
   4106		/* Sanity-check again: */
   4107		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
   4108			return -EBUSY;
   4109	}
   4110
   4111	if (buffer->next_element_to_fill == 0 &&
   4112	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
   4113		/* If a TX completion happens right _here_ and misses to wake
   4114		 * the txq, then our re-check below will catch the race.
   4115		 */
   4116		QETH_TXQ_STAT_INC(queue, stopped);
   4117		netif_tx_stop_queue(txq);
   4118		stopped = true;
   4119	}
   4120
   4121	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
   4122	buffer->bytes += bytes;
   4123	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
   4124	queue->prev_hdr = hdr;
   4125
   4126	flush = __netdev_tx_sent_queue(txq, bytes,
   4127				       !stopped && netdev_xmit_more());
   4128
   4129	if (flush || next_element >= queue->max_elements) {
   4130		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
   4131		queue->bulk_count++;
   4132
   4133		if (queue->bulk_count >= queue->bulk_max)
   4134			flush = true;
   4135
   4136		if (flush)
   4137			qeth_flush_queue(queue);
   4138	}
   4139
   4140	if (stopped && !qeth_out_queue_is_full(queue))
   4141		netif_tx_start_queue(txq);
   4142	return 0;
   4143}
   4144
   4145static int qeth_do_send_packet(struct qeth_card *card,
   4146			       struct qeth_qdio_out_q *queue,
   4147			       struct sk_buff *skb, struct qeth_hdr *hdr,
   4148			       unsigned int offset, unsigned int hd_len,
   4149			       unsigned int elements_needed)
   4150{
   4151	unsigned int start_index = queue->next_buf_to_fill;
   4152	struct qeth_qdio_out_buffer *buffer;
   4153	unsigned int next_element;
   4154	struct netdev_queue *txq;
   4155	bool stopped = false;
   4156	int flush_count = 0;
   4157	int do_pack = 0;
   4158	int rc = 0;
   4159
   4160	buffer = queue->bufs[queue->next_buf_to_fill];
   4161
   4162	/* Just a sanity check, the wake/stop logic should ensure that we always
   4163	 * get a free buffer.
   4164	 */
   4165	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
   4166		return -EBUSY;
   4167
   4168	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
   4169
   4170	/* check if we need to switch packing state of this queue */
   4171	qeth_switch_to_packing_if_needed(queue);
   4172	if (queue->do_pack) {
   4173		do_pack = 1;
   4174		/* does packet fit in current buffer? */
   4175		if (buffer->next_element_to_fill + elements_needed >
   4176		    queue->max_elements) {
   4177			/* ... no -> set state PRIMED */
   4178			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
   4179			flush_count++;
   4180			queue->next_buf_to_fill =
   4181				QDIO_BUFNR(queue->next_buf_to_fill + 1);
   4182			buffer = queue->bufs[queue->next_buf_to_fill];
   4183
   4184			/* We stepped forward, so sanity-check again: */
   4185			if (atomic_read(&buffer->state) !=
   4186			    QETH_QDIO_BUF_EMPTY) {
   4187				qeth_flush_buffers(queue, start_index,
   4188							   flush_count);
   4189				rc = -EBUSY;
   4190				goto out;
   4191			}
   4192		}
   4193	}
   4194
   4195	if (buffer->next_element_to_fill == 0 &&
   4196	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
   4197		/* If a TX completion happens right _here_ and misses to wake
   4198		 * the txq, then our re-check below will catch the race.
   4199		 */
   4200		QETH_TXQ_STAT_INC(queue, stopped);
   4201		netif_tx_stop_queue(txq);
   4202		stopped = true;
   4203	}
   4204
   4205	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
   4206	buffer->bytes += qdisc_pkt_len(skb);
   4207	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
   4208
   4209	if (queue->do_pack)
   4210		QETH_TXQ_STAT_INC(queue, skbs_pack);
   4211	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
   4212		flush_count++;
   4213		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
   4214		queue->next_buf_to_fill =
   4215				QDIO_BUFNR(queue->next_buf_to_fill + 1);
   4216	}
   4217
   4218	if (flush_count)
   4219		qeth_flush_buffers(queue, start_index, flush_count);
   4220
   4221out:
   4222	if (do_pack)
   4223		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
   4224
   4225	if (stopped && !qeth_out_queue_is_full(queue))
   4226		netif_tx_start_queue(txq);
   4227	return rc;
   4228}
   4229
   4230static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
   4231			      unsigned int payload_len, struct sk_buff *skb,
   4232			      unsigned int proto_len)
   4233{
   4234	struct qeth_hdr_ext_tso *ext = &hdr->ext;
   4235
   4236	ext->hdr_tot_len = sizeof(*ext);
   4237	ext->imb_hdr_no = 1;
   4238	ext->hdr_type = 1;
   4239	ext->hdr_version = 1;
   4240	ext->hdr_len = 28;
   4241	ext->payload_len = payload_len;
   4242	ext->mss = skb_shinfo(skb)->gso_size;
   4243	ext->dg_hdr_len = proto_len;
   4244}
   4245
   4246int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
   4247	      struct qeth_qdio_out_q *queue, __be16 proto,
   4248	      void (*fill_header)(struct qeth_qdio_out_q *queue,
   4249				  struct qeth_hdr *hdr, struct sk_buff *skb,
   4250				  __be16 proto, unsigned int data_len))
   4251{
   4252	unsigned int proto_len, hw_hdr_len;
   4253	unsigned int frame_len = skb->len;
   4254	bool is_tso = skb_is_gso(skb);
   4255	unsigned int data_offset = 0;
   4256	struct qeth_hdr *hdr = NULL;
   4257	unsigned int hd_len = 0;
   4258	unsigned int elements;
   4259	int push_len, rc;
   4260
   4261	if (is_tso) {
   4262		hw_hdr_len = sizeof(struct qeth_hdr_tso);
   4263		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
   4264	} else {
   4265		hw_hdr_len = sizeof(struct qeth_hdr);
   4266		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
   4267	}
   4268
   4269	rc = skb_cow_head(skb, hw_hdr_len);
   4270	if (rc)
   4271		return rc;
   4272
   4273	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
   4274				      &elements);
   4275	if (push_len < 0)
   4276		return push_len;
   4277	if (is_tso || !push_len) {
   4278		/* HW header needs its own buffer element. */
   4279		hd_len = hw_hdr_len + proto_len;
   4280		data_offset = push_len + proto_len;
   4281	}
   4282	memset(hdr, 0, hw_hdr_len);
   4283	fill_header(queue, hdr, skb, proto, frame_len);
   4284	if (is_tso)
   4285		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
   4286				  frame_len - proto_len, skb, proto_len);
   4287
   4288	if (IS_IQD(card)) {
   4289		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
   4290				 hd_len);
   4291	} else {
   4292		/* TODO: drop skb_orphan() once TX completion is fast enough */
   4293		skb_orphan(skb);
   4294		spin_lock(&queue->lock);
   4295		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
   4296					 hd_len, elements);
   4297		spin_unlock(&queue->lock);
   4298	}
   4299
   4300	if (rc && !push_len)
   4301		kmem_cache_free(qeth_core_header_cache, hdr);
   4302
   4303	return rc;
   4304}
   4305EXPORT_SYMBOL_GPL(qeth_xmit);
   4306
   4307static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
   4308		struct qeth_reply *reply, unsigned long data)
   4309{
   4310	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   4311	struct qeth_ipacmd_setadpparms *setparms;
   4312
   4313	QETH_CARD_TEXT(card, 4, "prmadpcb");
   4314
   4315	setparms = &(cmd->data.setadapterparms);
   4316	if (qeth_setadpparms_inspect_rc(cmd)) {
   4317		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
   4318		setparms->data.mode = SET_PROMISC_MODE_OFF;
   4319	}
   4320	card->info.promisc_mode = setparms->data.mode;
   4321	return (cmd->hdr.return_code) ? -EIO : 0;
   4322}
   4323
   4324void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
   4325{
   4326	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
   4327						    SET_PROMISC_MODE_OFF;
   4328	struct qeth_cmd_buffer *iob;
   4329	struct qeth_ipa_cmd *cmd;
   4330
   4331	QETH_CARD_TEXT(card, 4, "setprom");
   4332	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
   4333
   4334	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
   4335				   SETADP_DATA_SIZEOF(mode));
   4336	if (!iob)
   4337		return;
   4338	cmd = __ipa_cmd(iob);
   4339	cmd->data.setadapterparms.data.mode = mode;
   4340	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
   4341}
   4342EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
   4343
   4344static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
   4345		struct qeth_reply *reply, unsigned long data)
   4346{
   4347	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   4348	struct qeth_ipacmd_setadpparms *adp_cmd;
   4349
   4350	QETH_CARD_TEXT(card, 4, "chgmaccb");
   4351	if (qeth_setadpparms_inspect_rc(cmd))
   4352		return -EIO;
   4353
   4354	adp_cmd = &cmd->data.setadapterparms;
   4355	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
   4356		return -EADDRNOTAVAIL;
   4357
   4358	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
   4359	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
   4360		return -EADDRNOTAVAIL;
   4361
   4362	eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
   4363	return 0;
   4364}
   4365
   4366int qeth_setadpparms_change_macaddr(struct qeth_card *card)
   4367{
   4368	int rc;
   4369	struct qeth_cmd_buffer *iob;
   4370	struct qeth_ipa_cmd *cmd;
   4371
   4372	QETH_CARD_TEXT(card, 4, "chgmac");
   4373
   4374	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
   4375				   SETADP_DATA_SIZEOF(change_addr));
   4376	if (!iob)
   4377		return -ENOMEM;
   4378	cmd = __ipa_cmd(iob);
   4379	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
   4380	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
   4381	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
   4382			card->dev->dev_addr);
   4383	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
   4384			       NULL);
   4385	return rc;
   4386}
   4387EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
   4388
   4389static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
   4390		struct qeth_reply *reply, unsigned long data)
   4391{
   4392	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   4393	struct qeth_set_access_ctrl *access_ctrl_req;
   4394
   4395	QETH_CARD_TEXT(card, 4, "setaccb");
   4396
   4397	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
   4398	QETH_CARD_TEXT_(card, 2, "rc=%d",
   4399			cmd->data.setadapterparms.hdr.return_code);
   4400	if (cmd->data.setadapterparms.hdr.return_code !=
   4401						SET_ACCESS_CTRL_RC_SUCCESS)
   4402		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
   4403				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
   4404				 cmd->data.setadapterparms.hdr.return_code);
   4405	switch (qeth_setadpparms_inspect_rc(cmd)) {
   4406	case SET_ACCESS_CTRL_RC_SUCCESS:
   4407		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
   4408			dev_info(&card->gdev->dev,
   4409			    "QDIO data connection isolation is deactivated\n");
   4410		else
   4411			dev_info(&card->gdev->dev,
   4412			    "QDIO data connection isolation is activated\n");
   4413		return 0;
   4414	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
   4415		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
   4416				 CARD_DEVID(card));
   4417		return 0;
   4418	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
   4419		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
   4420				 CARD_DEVID(card));
   4421		return 0;
   4422	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
   4423		dev_err(&card->gdev->dev, "Adapter does not "
   4424			"support QDIO data connection isolation\n");
   4425		return -EOPNOTSUPP;
   4426	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
   4427		dev_err(&card->gdev->dev,
   4428			"Adapter is dedicated. "
   4429			"QDIO data connection isolation not supported\n");
   4430		return -EOPNOTSUPP;
   4431	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
   4432		dev_err(&card->gdev->dev,
   4433			"TSO does not permit QDIO data connection isolation\n");
   4434		return -EPERM;
   4435	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
   4436		dev_err(&card->gdev->dev, "The adjacent switch port does not "
   4437			"support reflective relay mode\n");
   4438		return -EOPNOTSUPP;
   4439	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
   4440		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
   4441					"enabled at the adjacent switch port");
   4442		return -EREMOTEIO;
   4443	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
   4444		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
   4445					"at the adjacent switch failed\n");
   4446		/* benign error while disabling ISOLATION_MODE_FWD */
   4447		return 0;
   4448	default:
   4449		return -EIO;
   4450	}
   4451}
   4452
   4453int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
   4454				     enum qeth_ipa_isolation_modes mode)
   4455{
   4456	int rc;
   4457	struct qeth_cmd_buffer *iob;
   4458	struct qeth_ipa_cmd *cmd;
   4459	struct qeth_set_access_ctrl *access_ctrl_req;
   4460
   4461	QETH_CARD_TEXT(card, 4, "setacctl");
   4462
   4463	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
   4464		dev_err(&card->gdev->dev,
   4465			"Adapter does not support QDIO data connection isolation\n");
   4466		return -EOPNOTSUPP;
   4467	}
   4468
   4469	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
   4470				   SETADP_DATA_SIZEOF(set_access_ctrl));
   4471	if (!iob)
   4472		return -ENOMEM;
   4473	cmd = __ipa_cmd(iob);
   4474	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
   4475	access_ctrl_req->subcmd_code = mode;
   4476
   4477	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
   4478			       NULL);
   4479	if (rc) {
   4480		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
   4481		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
   4482				 rc, CARD_DEVID(card));
   4483	}
   4484
   4485	return rc;
   4486}
   4487
   4488void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
   4489{
   4490	struct qeth_card *card;
   4491
   4492	card = dev->ml_priv;
   4493	QETH_CARD_TEXT(card, 4, "txtimeo");
   4494	qeth_schedule_recovery(card);
   4495}
   4496EXPORT_SYMBOL_GPL(qeth_tx_timeout);
   4497
   4498static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
   4499{
   4500	struct qeth_card *card = dev->ml_priv;
   4501	int rc = 0;
   4502
   4503	switch (regnum) {
   4504	case MII_BMCR: /* Basic mode control register */
   4505		rc = BMCR_FULLDPLX;
   4506		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
   4507		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
   4508		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
   4509			rc |= BMCR_SPEED100;
   4510		break;
   4511	case MII_BMSR: /* Basic mode status register */
   4512		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
   4513		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
   4514		     BMSR_100BASE4;
   4515		break;
   4516	case MII_PHYSID1: /* PHYS ID 1 */
   4517		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
   4518		     dev->dev_addr[2];
   4519		rc = (rc >> 5) & 0xFFFF;
   4520		break;
   4521	case MII_PHYSID2: /* PHYS ID 2 */
   4522		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
   4523		break;
   4524	case MII_ADVERTISE: /* Advertisement control reg */
   4525		rc = ADVERTISE_ALL;
   4526		break;
   4527	case MII_LPA: /* Link partner ability reg */
   4528		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
   4529		     LPA_100BASE4 | LPA_LPACK;
   4530		break;
   4531	case MII_EXPANSION: /* Expansion register */
   4532		break;
   4533	case MII_DCOUNTER: /* disconnect counter */
   4534		break;
   4535	case MII_FCSCOUNTER: /* false carrier counter */
   4536		break;
   4537	case MII_NWAYTEST: /* N-way auto-neg test register */
   4538		break;
   4539	case MII_RERRCOUNTER: /* rx error counter */
   4540		rc = card->stats.rx_length_errors +
   4541		     card->stats.rx_frame_errors +
   4542		     card->stats.rx_fifo_errors;
   4543		break;
   4544	case MII_SREVISION: /* silicon revision */
   4545		break;
   4546	case MII_RESV1: /* reserved 1 */
   4547		break;
   4548	case MII_LBRERROR: /* loopback, rx, bypass error */
   4549		break;
   4550	case MII_PHYADDR: /* physical address */
   4551		break;
   4552	case MII_RESV2: /* reserved 2 */
   4553		break;
   4554	case MII_TPISTATUS: /* TPI status for 10mbps */
   4555		break;
   4556	case MII_NCONFIG: /* network interface config */
   4557		break;
   4558	default:
   4559		break;
   4560	}
   4561	return rc;
   4562}
   4563
   4564static int qeth_snmp_command_cb(struct qeth_card *card,
   4565				struct qeth_reply *reply, unsigned long data)
   4566{
   4567	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   4568	struct qeth_arp_query_info *qinfo = reply->param;
   4569	struct qeth_ipacmd_setadpparms *adp_cmd;
   4570	unsigned int data_len;
   4571	void *snmp_data;
   4572
   4573	QETH_CARD_TEXT(card, 3, "snpcmdcb");
   4574
   4575	if (cmd->hdr.return_code) {
   4576		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
   4577		return -EIO;
   4578	}
   4579	if (cmd->data.setadapterparms.hdr.return_code) {
   4580		cmd->hdr.return_code =
   4581			cmd->data.setadapterparms.hdr.return_code;
   4582		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
   4583		return -EIO;
   4584	}
   4585
   4586	adp_cmd = &cmd->data.setadapterparms;
   4587	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
   4588	if (adp_cmd->hdr.seq_no == 1) {
   4589		snmp_data = &adp_cmd->data.snmp;
   4590	} else {
   4591		snmp_data = &adp_cmd->data.snmp.request;
   4592		data_len -= offsetof(struct qeth_snmp_cmd, request);
   4593	}
   4594
   4595	/* check if there is enough room in userspace */
   4596	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
   4597		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
   4598		return -ENOSPC;
   4599	}
   4600	QETH_CARD_TEXT_(card, 4, "snore%i",
   4601			cmd->data.setadapterparms.hdr.used_total);
   4602	QETH_CARD_TEXT_(card, 4, "sseqn%i",
   4603			cmd->data.setadapterparms.hdr.seq_no);
   4604	/*copy entries to user buffer*/
   4605	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
   4606	qinfo->udata_offset += data_len;
   4607
   4608	if (cmd->data.setadapterparms.hdr.seq_no <
   4609	    cmd->data.setadapterparms.hdr.used_total)
   4610		return 1;
   4611	return 0;
   4612}
   4613
   4614static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
   4615{
   4616	struct qeth_snmp_ureq __user *ureq;
   4617	struct qeth_cmd_buffer *iob;
   4618	unsigned int req_len;
   4619	struct qeth_arp_query_info qinfo = {0, };
   4620	int rc = 0;
   4621
   4622	QETH_CARD_TEXT(card, 3, "snmpcmd");
   4623
   4624	if (IS_VM_NIC(card))
   4625		return -EOPNOTSUPP;
   4626
   4627	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
   4628	    IS_LAYER3(card))
   4629		return -EOPNOTSUPP;
   4630
   4631	ureq = (struct qeth_snmp_ureq __user *) udata;
   4632	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
   4633	    get_user(req_len, &ureq->hdr.req_len))
   4634		return -EFAULT;
   4635
   4636	/* Sanitize user input, to avoid overflows in iob size calculation: */
   4637	if (req_len > QETH_BUFSIZE)
   4638		return -EINVAL;
   4639
   4640	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
   4641	if (!iob)
   4642		return -ENOMEM;
   4643
   4644	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
   4645			   &ureq->cmd, req_len)) {
   4646		qeth_put_cmd(iob);
   4647		return -EFAULT;
   4648	}
   4649
   4650	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
   4651	if (!qinfo.udata) {
   4652		qeth_put_cmd(iob);
   4653		return -ENOMEM;
   4654	}
   4655	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
   4656
   4657	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
   4658	if (rc)
   4659		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
   4660				 CARD_DEVID(card), rc);
   4661	else {
   4662		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
   4663			rc = -EFAULT;
   4664	}
   4665
   4666	kfree(qinfo.udata);
   4667	return rc;
   4668}
   4669
   4670static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
   4671					 struct qeth_reply *reply,
   4672					 unsigned long data)
   4673{
   4674	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
   4675	struct qeth_qoat_priv *priv = reply->param;
   4676	int resdatalen;
   4677
   4678	QETH_CARD_TEXT(card, 3, "qoatcb");
   4679	if (qeth_setadpparms_inspect_rc(cmd))
   4680		return -EIO;
   4681
   4682	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
   4683
   4684	if (resdatalen > (priv->buffer_len - priv->response_len))
   4685		return -ENOSPC;
   4686
   4687	memcpy(priv->buffer + priv->response_len,
   4688	       &cmd->data.setadapterparms.hdr, resdatalen);
   4689	priv->response_len += resdatalen;
   4690
   4691	if (cmd->data.setadapterparms.hdr.seq_no <
   4692	    cmd->data.setadapterparms.hdr.used_total)
   4693		return 1;
   4694	return 0;
   4695}
   4696
   4697static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
   4698{
   4699	int rc = 0;
   4700	struct qeth_cmd_buffer *iob;
   4701	struct qeth_ipa_cmd *cmd;
   4702	struct qeth_query_oat *oat_req;
   4703	struct qeth_query_oat_data oat_data;
   4704	struct qeth_qoat_priv priv;
   4705	void __user *tmp;
   4706
   4707	QETH_CARD_TEXT(card, 3, "qoatcmd");
   4708
   4709	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
   4710		return -EOPNOTSUPP;
   4711
   4712	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
   4713		return -EFAULT;
   4714
   4715	priv.buffer_len = oat_data.buffer_len;
   4716	priv.response_len = 0;
   4717	priv.buffer = vzalloc(oat_data.buffer_len);
   4718	if (!priv.buffer)
   4719		return -ENOMEM;
   4720
   4721	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
   4722				   SETADP_DATA_SIZEOF(query_oat));
   4723	if (!iob) {
   4724		rc = -ENOMEM;
   4725		goto out_free;
   4726	}
   4727	cmd = __ipa_cmd(iob);
   4728	oat_req = &cmd->data.setadapterparms.data.query_oat;
   4729	oat_req->subcmd_code = oat_data.command;
   4730
   4731	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
   4732	if (!rc) {
   4733		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
   4734					 u64_to_user_ptr(oat_data.ptr);
   4735		oat_data.response_len = priv.response_len;
   4736
   4737		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
   4738		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
   4739			rc = -EFAULT;
   4740	}
   4741
   4742out_free:
   4743	vfree(priv.buffer);
   4744	return rc;
   4745}
   4746
   4747static int qeth_query_card_info_cb(struct qeth_card *card,
   4748				   struct qeth_reply *reply, unsigned long data)
   4749{
   4750	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
   4751	struct qeth_link_info *link_info = reply->param;
   4752	struct qeth_query_card_info *card_info;
   4753
   4754	QETH_CARD_TEXT(card, 2, "qcrdincb");
   4755	if (qeth_setadpparms_inspect_rc(cmd))
   4756		return -EIO;
   4757
   4758	card_info = &cmd->data.setadapterparms.data.card_info;
   4759	netdev_dbg(card->dev,
   4760		   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
   4761		   card_info->card_type, card_info->port_mode,
   4762		   card_info->port_speed);
   4763
   4764	switch (card_info->port_mode) {
   4765	case CARD_INFO_PORTM_FULLDUPLEX:
   4766		link_info->duplex = DUPLEX_FULL;
   4767		break;
   4768	case CARD_INFO_PORTM_HALFDUPLEX:
   4769		link_info->duplex = DUPLEX_HALF;
   4770		break;
   4771	default:
   4772		link_info->duplex = DUPLEX_UNKNOWN;
   4773	}
   4774
   4775	switch (card_info->card_type) {
   4776	case CARD_INFO_TYPE_1G_COPPER_A:
   4777	case CARD_INFO_TYPE_1G_COPPER_B:
   4778		link_info->speed = SPEED_1000;
   4779		link_info->port = PORT_TP;
   4780		break;
   4781	case CARD_INFO_TYPE_1G_FIBRE_A:
   4782	case CARD_INFO_TYPE_1G_FIBRE_B:
   4783		link_info->speed = SPEED_1000;
   4784		link_info->port = PORT_FIBRE;
   4785		break;
   4786	case CARD_INFO_TYPE_10G_FIBRE_A:
   4787	case CARD_INFO_TYPE_10G_FIBRE_B:
   4788		link_info->speed = SPEED_10000;
   4789		link_info->port = PORT_FIBRE;
   4790		break;
   4791	default:
   4792		switch (card_info->port_speed) {
   4793		case CARD_INFO_PORTS_10M:
   4794			link_info->speed = SPEED_10;
   4795			break;
   4796		case CARD_INFO_PORTS_100M:
   4797			link_info->speed = SPEED_100;
   4798			break;
   4799		case CARD_INFO_PORTS_1G:
   4800			link_info->speed = SPEED_1000;
   4801			break;
   4802		case CARD_INFO_PORTS_10G:
   4803			link_info->speed = SPEED_10000;
   4804			break;
   4805		case CARD_INFO_PORTS_25G:
   4806			link_info->speed = SPEED_25000;
   4807			break;
   4808		default:
   4809			link_info->speed = SPEED_UNKNOWN;
   4810		}
   4811
   4812		link_info->port = PORT_OTHER;
   4813	}
   4814
   4815	return 0;
   4816}
   4817
   4818int qeth_query_card_info(struct qeth_card *card,
   4819			 struct qeth_link_info *link_info)
   4820{
   4821	struct qeth_cmd_buffer *iob;
   4822
   4823	QETH_CARD_TEXT(card, 2, "qcrdinfo");
   4824	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
   4825		return -EOPNOTSUPP;
   4826	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
   4827	if (!iob)
   4828		return -ENOMEM;
   4829
   4830	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
   4831}
   4832
   4833static int qeth_init_link_info_oat_cb(struct qeth_card *card,
   4834				      struct qeth_reply *reply_priv,
   4835				      unsigned long data)
   4836{
   4837	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
   4838	struct qeth_link_info *link_info = reply_priv->param;
   4839	struct qeth_query_oat_physical_if *phys_if;
   4840	struct qeth_query_oat_reply *reply;
   4841
   4842	if (qeth_setadpparms_inspect_rc(cmd))
   4843		return -EIO;
   4844
   4845	/* Multi-part reply is unexpected, don't bother: */
   4846	if (cmd->data.setadapterparms.hdr.used_total > 1)
   4847		return -EINVAL;
   4848
   4849	/* Expect the reply to start with phys_if data: */
   4850	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
   4851	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
   4852	    reply->length < sizeof(*reply))
   4853		return -EINVAL;
   4854
   4855	phys_if = &reply->phys_if;
   4856
   4857	switch (phys_if->speed_duplex) {
   4858	case QETH_QOAT_PHYS_SPEED_10M_HALF:
   4859		link_info->speed = SPEED_10;
   4860		link_info->duplex = DUPLEX_HALF;
   4861		break;
   4862	case QETH_QOAT_PHYS_SPEED_10M_FULL:
   4863		link_info->speed = SPEED_10;
   4864		link_info->duplex = DUPLEX_FULL;
   4865		break;
   4866	case QETH_QOAT_PHYS_SPEED_100M_HALF:
   4867		link_info->speed = SPEED_100;
   4868		link_info->duplex = DUPLEX_HALF;
   4869		break;
   4870	case QETH_QOAT_PHYS_SPEED_100M_FULL:
   4871		link_info->speed = SPEED_100;
   4872		link_info->duplex = DUPLEX_FULL;
   4873		break;
   4874	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
   4875		link_info->speed = SPEED_1000;
   4876		link_info->duplex = DUPLEX_HALF;
   4877		break;
   4878	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
   4879		link_info->speed = SPEED_1000;
   4880		link_info->duplex = DUPLEX_FULL;
   4881		break;
   4882	case QETH_QOAT_PHYS_SPEED_10G_FULL:
   4883		link_info->speed = SPEED_10000;
   4884		link_info->duplex = DUPLEX_FULL;
   4885		break;
   4886	case QETH_QOAT_PHYS_SPEED_25G_FULL:
   4887		link_info->speed = SPEED_25000;
   4888		link_info->duplex = DUPLEX_FULL;
   4889		break;
   4890	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
   4891	default:
   4892		link_info->speed = SPEED_UNKNOWN;
   4893		link_info->duplex = DUPLEX_UNKNOWN;
   4894		break;
   4895	}
   4896
   4897	switch (phys_if->media_type) {
   4898	case QETH_QOAT_PHYS_MEDIA_COPPER:
   4899		link_info->port = PORT_TP;
   4900		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
   4901		break;
   4902	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
   4903		link_info->port = PORT_FIBRE;
   4904		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
   4905		break;
   4906	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
   4907		link_info->port = PORT_FIBRE;
   4908		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
   4909		break;
   4910	default:
   4911		link_info->port = PORT_OTHER;
   4912		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
   4913		break;
   4914	}
   4915
   4916	return 0;
   4917}
   4918
   4919static void qeth_init_link_info(struct qeth_card *card)
   4920{
   4921	card->info.link_info.duplex = DUPLEX_FULL;
   4922
   4923	if (IS_IQD(card) || IS_VM_NIC(card)) {
   4924		card->info.link_info.speed = SPEED_10000;
   4925		card->info.link_info.port = PORT_FIBRE;
   4926		card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
   4927	} else {
   4928		switch (card->info.link_type) {
   4929		case QETH_LINK_TYPE_FAST_ETH:
   4930		case QETH_LINK_TYPE_LANE_ETH100:
   4931			card->info.link_info.speed = SPEED_100;
   4932			card->info.link_info.port = PORT_TP;
   4933			break;
   4934		case QETH_LINK_TYPE_GBIT_ETH:
   4935		case QETH_LINK_TYPE_LANE_ETH1000:
   4936			card->info.link_info.speed = SPEED_1000;
   4937			card->info.link_info.port = PORT_FIBRE;
   4938			break;
   4939		case QETH_LINK_TYPE_10GBIT_ETH:
   4940			card->info.link_info.speed = SPEED_10000;
   4941			card->info.link_info.port = PORT_FIBRE;
   4942			break;
   4943		case QETH_LINK_TYPE_25GBIT_ETH:
   4944			card->info.link_info.speed = SPEED_25000;
   4945			card->info.link_info.port = PORT_FIBRE;
   4946			break;
   4947		default:
   4948			dev_info(&card->gdev->dev, "Unknown link type %x\n",
   4949				 card->info.link_type);
   4950			card->info.link_info.speed = SPEED_UNKNOWN;
   4951			card->info.link_info.port = PORT_OTHER;
   4952		}
   4953
   4954		card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
   4955	}
   4956
   4957	/* Get more accurate data via QUERY OAT: */
   4958	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
   4959		struct qeth_link_info link_info;
   4960		struct qeth_cmd_buffer *iob;
   4961
   4962		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
   4963					   SETADP_DATA_SIZEOF(query_oat));
   4964		if (iob) {
   4965			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
   4966			struct qeth_query_oat *oat_req;
   4967
   4968			oat_req = &cmd->data.setadapterparms.data.query_oat;
   4969			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
   4970
   4971			if (!qeth_send_ipa_cmd(card, iob,
   4972					       qeth_init_link_info_oat_cb,
   4973					       &link_info)) {
   4974				if (link_info.speed != SPEED_UNKNOWN)
   4975					card->info.link_info.speed = link_info.speed;
   4976				if (link_info.duplex != DUPLEX_UNKNOWN)
   4977					card->info.link_info.duplex = link_info.duplex;
   4978				if (link_info.port != PORT_OTHER)
   4979					card->info.link_info.port = link_info.port;
   4980				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
   4981					card->info.link_info.link_mode = link_info.link_mode;
   4982			}
   4983		}
   4984	}
   4985}
   4986
   4987/**
   4988 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
   4989 * @card: pointer to a qeth_card
   4990 *
   4991 * Returns
   4992 *	0, if a MAC address has been set for the card's netdevice
   4993 *	a return code, for various error conditions
   4994 */
   4995int qeth_vm_request_mac(struct qeth_card *card)
   4996{
   4997	struct diag26c_mac_resp *response;
   4998	struct diag26c_mac_req *request;
   4999	int rc;
   5000
   5001	QETH_CARD_TEXT(card, 2, "vmreqmac");
   5002
   5003	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
   5004	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
   5005	if (!request || !response) {
   5006		rc = -ENOMEM;
   5007		goto out;
   5008	}
   5009
   5010	request->resp_buf_len = sizeof(*response);
   5011	request->resp_version = DIAG26C_VERSION2;
   5012	request->op_code = DIAG26C_GET_MAC;
   5013	request->devno = card->info.ddev_devno;
   5014
   5015	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
   5016	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
   5017	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
   5018	if (rc)
   5019		goto out;
   5020	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
   5021
   5022	if (request->resp_buf_len < sizeof(*response) ||
   5023	    response->version != request->resp_version) {
   5024		rc = -EIO;
   5025		QETH_CARD_TEXT(card, 2, "badresp");
   5026		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
   5027			      sizeof(request->resp_buf_len));
   5028	} else if (!is_valid_ether_addr(response->mac)) {
   5029		rc = -EINVAL;
   5030		QETH_CARD_TEXT(card, 2, "badmac");
   5031		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
   5032	} else {
   5033		eth_hw_addr_set(card->dev, response->mac);
   5034	}
   5035
   5036out:
   5037	kfree(response);
   5038	kfree(request);
   5039	return rc;
   5040}
   5041EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
   5042
   5043static void qeth_determine_capabilities(struct qeth_card *card)
   5044{
   5045	struct qeth_channel *channel = &card->data;
   5046	struct ccw_device *ddev = channel->ccwdev;
   5047	int rc;
   5048	int ddev_offline = 0;
   5049
   5050	QETH_CARD_TEXT(card, 2, "detcapab");
   5051	if (!ddev->online) {
   5052		ddev_offline = 1;
   5053		rc = qeth_start_channel(channel);
   5054		if (rc) {
   5055			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
   5056			goto out;
   5057		}
   5058	}
   5059
   5060	rc = qeth_read_conf_data(card);
   5061	if (rc) {
   5062		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
   5063				 CARD_DEVID(card), rc);
   5064		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
   5065		goto out_offline;
   5066	}
   5067
   5068	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
   5069	if (rc)
   5070		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
   5071
   5072	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
   5073	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
   5074	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
   5075	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
   5076	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
   5077	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
   5078	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
   5079	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
   5080		dev_info(&card->gdev->dev,
   5081			"Completion Queueing supported\n");
   5082	} else {
   5083		card->options.cq = QETH_CQ_NOTAVAILABLE;
   5084	}
   5085
   5086out_offline:
   5087	if (ddev_offline == 1)
   5088		qeth_stop_channel(channel);
   5089out:
   5090	return;
   5091}
   5092
   5093static void qeth_read_ccw_conf_data(struct qeth_card *card)
   5094{
   5095	struct qeth_card_info *info = &card->info;
   5096	struct ccw_device *cdev = CARD_DDEV(card);
   5097	struct ccw_dev_id dev_id;
   5098
   5099	QETH_CARD_TEXT(card, 2, "ccwconfd");
   5100	ccw_device_get_id(cdev, &dev_id);
   5101
   5102	info->ddev_devno = dev_id.devno;
   5103	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
   5104			  !ccw_device_get_iid(cdev, &info->iid) &&
   5105			  !ccw_device_get_chid(cdev, 0, &info->chid);
   5106	info->ssid = dev_id.ssid;
   5107
   5108	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
   5109		 info->chid, info->chpid);
   5110
   5111	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
   5112	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
   5113	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
   5114	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
   5115	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
   5116	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
   5117	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
   5118}
   5119
   5120static int qeth_qdio_establish(struct qeth_card *card)
   5121{
   5122	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
   5123	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
   5124	struct qeth_qib_parms *qib_parms = NULL;
   5125	struct qdio_initialize init_data;
   5126	unsigned int no_input_qs = 1;
   5127	unsigned int i;
   5128	int rc = 0;
   5129
   5130	QETH_CARD_TEXT(card, 2, "qdioest");
   5131
   5132	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
   5133		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
   5134		if (!qib_parms)
   5135			return -ENOMEM;
   5136
   5137		qeth_fill_qib_parms(card, qib_parms);
   5138	}
   5139
   5140	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
   5141	if (card->options.cq == QETH_CQ_ENABLED) {
   5142		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
   5143		no_input_qs++;
   5144	}
   5145
   5146	for (i = 0; i < card->qdio.no_out_queues; i++)
   5147		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
   5148
   5149	memset(&init_data, 0, sizeof(struct qdio_initialize));
   5150	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
   5151							  QDIO_QETH_QFMT;
   5152	init_data.qib_param_field_format = 0;
   5153	init_data.qib_param_field	 = (void *)qib_parms;
   5154	init_data.no_input_qs		 = no_input_qs;
   5155	init_data.no_output_qs           = card->qdio.no_out_queues;
   5156	init_data.input_handler		 = qeth_qdio_input_handler;
   5157	init_data.output_handler	 = qeth_qdio_output_handler;
   5158	init_data.irq_poll		 = qeth_qdio_poll;
   5159	init_data.int_parm               = (unsigned long) card;
   5160	init_data.input_sbal_addr_array  = in_sbal_ptrs;
   5161	init_data.output_sbal_addr_array = out_sbal_ptrs;
   5162
   5163	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
   5164		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
   5165		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
   5166				   init_data.no_output_qs);
   5167		if (rc) {
   5168			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
   5169			goto out;
   5170		}
   5171		rc = qdio_establish(CARD_DDEV(card), &init_data);
   5172		if (rc) {
   5173			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
   5174			qdio_free(CARD_DDEV(card));
   5175		}
   5176	}
   5177
   5178	switch (card->options.cq) {
   5179	case QETH_CQ_ENABLED:
   5180		dev_info(&card->gdev->dev, "Completion Queue support enabled");
   5181		break;
   5182	case QETH_CQ_DISABLED:
   5183		dev_info(&card->gdev->dev, "Completion Queue support disabled");
   5184		break;
   5185	default:
   5186		break;
   5187	}
   5188
   5189out:
   5190	kfree(qib_parms);
   5191	return rc;
   5192}
   5193
   5194static void qeth_core_free_card(struct qeth_card *card)
   5195{
   5196	QETH_CARD_TEXT(card, 2, "freecrd");
   5197
   5198	unregister_service_level(&card->qeth_service_level);
   5199	debugfs_remove_recursive(card->debugfs);
   5200	qeth_put_cmd(card->read_cmd);
   5201	destroy_workqueue(card->event_wq);
   5202	dev_set_drvdata(&card->gdev->dev, NULL);
   5203	kfree(card);
   5204}
   5205
   5206static void qeth_trace_features(struct qeth_card *card)
   5207{
   5208	QETH_CARD_TEXT(card, 2, "features");
   5209	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
   5210	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
   5211	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
   5212	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
   5213		      sizeof(card->info.diagass_support));
   5214}
   5215
   5216static struct ccw_device_id qeth_ids[] = {
   5217	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
   5218					.driver_info = QETH_CARD_TYPE_OSD},
   5219	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
   5220					.driver_info = QETH_CARD_TYPE_IQD},
   5221	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
   5222					.driver_info = QETH_CARD_TYPE_OSM},
   5223#ifdef CONFIG_QETH_OSX
   5224	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
   5225					.driver_info = QETH_CARD_TYPE_OSX},
   5226#endif
   5227	{},
   5228};
   5229MODULE_DEVICE_TABLE(ccw, qeth_ids);
   5230
   5231static struct ccw_driver qeth_ccw_driver = {
   5232	.driver = {
   5233		.owner = THIS_MODULE,
   5234		.name = "qeth",
   5235	},
   5236	.ids = qeth_ids,
   5237	.probe = ccwgroup_probe_ccwdev,
   5238	.remove = ccwgroup_remove_ccwdev,
   5239};
   5240
   5241static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
   5242{
   5243	int retries = 3;
   5244	int rc;
   5245
   5246	QETH_CARD_TEXT(card, 2, "hrdsetup");
   5247	atomic_set(&card->force_alloc_skb, 0);
   5248	rc = qeth_update_from_chp_desc(card);
   5249	if (rc)
   5250		return rc;
   5251retry:
   5252	if (retries < 3)
   5253		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
   5254				 CARD_DEVID(card));
   5255	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
   5256	qeth_stop_channel(&card->data);
   5257	qeth_stop_channel(&card->write);
   5258	qeth_stop_channel(&card->read);
   5259	qdio_free(CARD_DDEV(card));
   5260
   5261	rc = qeth_start_channel(&card->read);
   5262	if (rc)
   5263		goto retriable;
   5264	rc = qeth_start_channel(&card->write);
   5265	if (rc)
   5266		goto retriable;
   5267	rc = qeth_start_channel(&card->data);
   5268	if (rc)
   5269		goto retriable;
   5270retriable:
   5271	if (rc == -ERESTARTSYS) {
   5272		QETH_CARD_TEXT(card, 2, "break1");
   5273		return rc;
   5274	} else if (rc) {
   5275		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
   5276		if (--retries < 0)
   5277			goto out;
   5278		else
   5279			goto retry;
   5280	}
   5281
   5282	qeth_determine_capabilities(card);
   5283	qeth_read_ccw_conf_data(card);
   5284	qeth_idx_init(card);
   5285
   5286	rc = qeth_idx_activate_read_channel(card);
   5287	if (rc == -EINTR) {
   5288		QETH_CARD_TEXT(card, 2, "break2");
   5289		return rc;
   5290	} else if (rc) {
   5291		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
   5292		if (--retries < 0)
   5293			goto out;
   5294		else
   5295			goto retry;
   5296	}
   5297
   5298	rc = qeth_idx_activate_write_channel(card);
   5299	if (rc == -EINTR) {
   5300		QETH_CARD_TEXT(card, 2, "break3");
   5301		return rc;
   5302	} else if (rc) {
   5303		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
   5304		if (--retries < 0)
   5305			goto out;
   5306		else
   5307			goto retry;
   5308	}
   5309	card->read_or_write_problem = 0;
   5310	rc = qeth_mpc_initialize(card);
   5311	if (rc) {
   5312		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
   5313		goto out;
   5314	}
   5315
   5316	rc = qeth_send_startlan(card);
   5317	if (rc) {
   5318		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
   5319		if (rc == -ENETDOWN) {
   5320			dev_warn(&card->gdev->dev, "The LAN is offline\n");
   5321			*carrier_ok = false;
   5322		} else {
   5323			goto out;
   5324		}
   5325	} else {
   5326		*carrier_ok = true;
   5327	}
   5328
   5329	card->options.ipa4.supported = 0;
   5330	card->options.ipa6.supported = 0;
   5331	card->options.adp.supported = 0;
   5332	card->options.sbp.supported_funcs = 0;
   5333	card->info.diagass_support = 0;
   5334	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
   5335	if (rc == -ENOMEM)
   5336		goto out;
   5337	if (qeth_is_supported(card, IPA_IPV6)) {
   5338		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
   5339		if (rc == -ENOMEM)
   5340			goto out;
   5341	}
   5342	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
   5343		rc = qeth_query_setadapterparms(card);
   5344		if (rc < 0) {
   5345			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
   5346			goto out;
   5347		}
   5348	}
   5349	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
   5350		rc = qeth_query_setdiagass(card);
   5351		if (rc)
   5352			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
   5353	}
   5354
   5355	qeth_trace_features(card);
   5356
   5357	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
   5358	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
   5359		card->info.hwtrap = 0;
   5360
   5361	if (card->options.isolation != ISOLATION_MODE_NONE) {
   5362		rc = qeth_setadpparms_set_access_ctrl(card,
   5363						      card->options.isolation);
   5364		if (rc)
   5365			goto out;
   5366	}
   5367
   5368	qeth_init_link_info(card);
   5369
   5370	rc = qeth_init_qdio_queues(card);
   5371	if (rc) {
   5372		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
   5373		goto out;
   5374	}
   5375
   5376	return 0;
   5377out:
   5378	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
   5379		"an error on the device\n");
   5380	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
   5381			 CARD_DEVID(card), rc);
   5382	return rc;
   5383}
   5384
   5385static int qeth_set_online(struct qeth_card *card,
   5386			   const struct qeth_discipline *disc)
   5387{
   5388	bool carrier_ok;
   5389	int rc;
   5390
   5391	mutex_lock(&card->conf_mutex);
   5392	QETH_CARD_TEXT(card, 2, "setonlin");
   5393
   5394	rc = qeth_hardsetup_card(card, &carrier_ok);
   5395	if (rc) {
   5396		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
   5397		rc = -ENODEV;
   5398		goto err_hardsetup;
   5399	}
   5400
   5401	qeth_print_status_message(card);
   5402
   5403	if (card->dev->reg_state != NETREG_REGISTERED)
   5404		/* no need for locking / error handling at this early stage: */
   5405		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
   5406
   5407	rc = disc->set_online(card, carrier_ok);
   5408	if (rc)
   5409		goto err_online;
   5410
   5411	/* let user_space know that device is online */
   5412	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
   5413
   5414	mutex_unlock(&card->conf_mutex);
   5415	return 0;
   5416
   5417err_online:
   5418err_hardsetup:
   5419	qeth_qdio_clear_card(card, 0);
   5420	qeth_clear_working_pool_list(card);
   5421	qeth_flush_local_addrs(card);
   5422
   5423	qeth_stop_channel(&card->data);
   5424	qeth_stop_channel(&card->write);
   5425	qeth_stop_channel(&card->read);
   5426	qdio_free(CARD_DDEV(card));
   5427
   5428	mutex_unlock(&card->conf_mutex);
   5429	return rc;
   5430}
   5431
   5432int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
   5433		     bool resetting)
   5434{
   5435	int rc, rc2, rc3;
   5436
   5437	mutex_lock(&card->conf_mutex);
   5438	QETH_CARD_TEXT(card, 3, "setoffl");
   5439
   5440	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
   5441		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
   5442		card->info.hwtrap = 1;
   5443	}
   5444
   5445	/* cancel any stalled cmd that might block the rtnl: */
   5446	qeth_clear_ipacmd_list(card);
   5447
   5448	rtnl_lock();
   5449	card->info.open_when_online = card->dev->flags & IFF_UP;
   5450	dev_close(card->dev);
   5451	netif_device_detach(card->dev);
   5452	netif_carrier_off(card->dev);
   5453	rtnl_unlock();
   5454
   5455	cancel_work_sync(&card->rx_mode_work);
   5456
   5457	disc->set_offline(card);
   5458
   5459	qeth_qdio_clear_card(card, 0);
   5460	qeth_drain_output_queues(card);
   5461	qeth_clear_working_pool_list(card);
   5462	qeth_flush_local_addrs(card);
   5463	card->info.promisc_mode = 0;
   5464
   5465	rc  = qeth_stop_channel(&card->data);
   5466	rc2 = qeth_stop_channel(&card->write);
   5467	rc3 = qeth_stop_channel(&card->read);
   5468	if (!rc)
   5469		rc = (rc2) ? rc2 : rc3;
   5470	if (rc)
   5471		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
   5472	qdio_free(CARD_DDEV(card));
   5473
   5474	/* let user_space know that device is offline */
   5475	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
   5476
   5477	mutex_unlock(&card->conf_mutex);
   5478	return 0;
   5479}
   5480EXPORT_SYMBOL_GPL(qeth_set_offline);
   5481
   5482static int qeth_do_reset(void *data)
   5483{
   5484	const struct qeth_discipline *disc;
   5485	struct qeth_card *card = data;
   5486	int rc;
   5487
   5488	/* Lock-free, other users will block until we are done. */
   5489	disc = card->discipline;
   5490
   5491	QETH_CARD_TEXT(card, 2, "recover1");
   5492	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
   5493		return 0;
   5494	QETH_CARD_TEXT(card, 2, "recover2");
   5495	dev_warn(&card->gdev->dev,
   5496		 "A recovery process has been started for the device\n");
   5497
   5498	qeth_set_offline(card, disc, true);
   5499	rc = qeth_set_online(card, disc);
   5500	if (!rc) {
   5501		dev_info(&card->gdev->dev,
   5502			 "Device successfully recovered!\n");
   5503	} else {
   5504		qeth_set_offline(card, disc, true);
   5505		ccwgroup_set_offline(card->gdev, false);
   5506		dev_warn(&card->gdev->dev,
   5507			 "The qeth device driver failed to recover an error on the device\n");
   5508	}
   5509	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
   5510	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
   5511	return 0;
   5512}
   5513
   5514#if IS_ENABLED(CONFIG_QETH_L3)
   5515static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
   5516				struct qeth_hdr *hdr)
   5517{
   5518	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
   5519	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
   5520	struct net_device *dev = skb->dev;
   5521
   5522	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
   5523		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
   5524				"FAKELL", skb->len);
   5525		return;
   5526	}
   5527
   5528	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
   5529		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
   5530							     ETH_P_IP;
   5531		unsigned char tg_addr[ETH_ALEN];
   5532
   5533		skb_reset_network_header(skb);
   5534		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
   5535		case QETH_CAST_MULTICAST:
   5536			if (prot == ETH_P_IP)
   5537				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
   5538			else
   5539				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
   5540			QETH_CARD_STAT_INC(card, rx_multicast);
   5541			break;
   5542		case QETH_CAST_BROADCAST:
   5543			ether_addr_copy(tg_addr, dev->broadcast);
   5544			QETH_CARD_STAT_INC(card, rx_multicast);
   5545			break;
   5546		default:
   5547			if (card->options.sniffer)
   5548				skb->pkt_type = PACKET_OTHERHOST;
   5549			ether_addr_copy(tg_addr, dev->dev_addr);
   5550		}
   5551
   5552		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
   5553			dev_hard_header(skb, dev, prot, tg_addr,
   5554					&l3_hdr->next_hop.rx.src_mac, skb->len);
   5555		else
   5556			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
   5557					skb->len);
   5558	}
   5559
   5560	/* copy VLAN tag from hdr into skb */
   5561	if (!card->options.sniffer &&
   5562	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
   5563				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
   5564		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
   5565				l3_hdr->vlan_id :
   5566				l3_hdr->next_hop.rx.vlan_id;
   5567
   5568		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
   5569	}
   5570}
   5571#endif
   5572
   5573static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
   5574			     bool uses_frags, bool is_cso)
   5575{
   5576	struct napi_struct *napi = &card->napi;
   5577
   5578	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
   5579		skb->ip_summed = CHECKSUM_UNNECESSARY;
   5580		QETH_CARD_STAT_INC(card, rx_skb_csum);
   5581	} else {
   5582		skb->ip_summed = CHECKSUM_NONE;
   5583	}
   5584
   5585	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
   5586	QETH_CARD_STAT_INC(card, rx_packets);
   5587	if (skb_is_nonlinear(skb)) {
   5588		QETH_CARD_STAT_INC(card, rx_sg_skbs);
   5589		QETH_CARD_STAT_ADD(card, rx_sg_frags,
   5590				   skb_shinfo(skb)->nr_frags);
   5591	}
   5592
   5593	if (uses_frags) {
   5594		napi_gro_frags(napi);
   5595	} else {
   5596		skb->protocol = eth_type_trans(skb, skb->dev);
   5597		napi_gro_receive(napi, skb);
   5598	}
   5599}
   5600
   5601static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
   5602{
   5603	struct page *page = virt_to_page(data);
   5604	unsigned int next_frag;
   5605
   5606	next_frag = skb_shinfo(skb)->nr_frags;
   5607	get_page(page);
   5608	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
   5609			data_len);
   5610}
   5611
   5612static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
   5613{
   5614	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
   5615}
   5616
   5617static int qeth_extract_skb(struct qeth_card *card,
   5618			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
   5619			    int *__offset)
   5620{
   5621	struct qeth_priv *priv = netdev_priv(card->dev);
   5622	struct qdio_buffer *buffer = qethbuffer->buffer;
   5623	struct napi_struct *napi = &card->napi;
   5624	struct qdio_buffer_element *element;
   5625	unsigned int linear_len = 0;
   5626	bool uses_frags = false;
   5627	int offset = *__offset;
   5628	bool use_rx_sg = false;
   5629	unsigned int headroom;
   5630	struct qeth_hdr *hdr;
   5631	struct sk_buff *skb;
   5632	int skb_len = 0;
   5633	bool is_cso;
   5634
   5635	element = &buffer->element[*element_no];
   5636
   5637next_packet:
   5638	/* qeth_hdr must not cross element boundaries */
   5639	while (element->length < offset + sizeof(struct qeth_hdr)) {
   5640		if (qeth_is_last_sbale(element))
   5641			return -ENODATA;
   5642		element++;
   5643		offset = 0;
   5644	}
   5645
   5646	hdr = phys_to_virt(element->addr) + offset;
   5647	offset += sizeof(*hdr);
   5648	skb = NULL;
   5649
   5650	switch (hdr->hdr.l2.id) {
   5651	case QETH_HEADER_TYPE_LAYER2:
   5652		skb_len = hdr->hdr.l2.pkt_length;
   5653		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
   5654
   5655		linear_len = ETH_HLEN;
   5656		headroom = 0;
   5657		break;
   5658	case QETH_HEADER_TYPE_LAYER3:
   5659		skb_len = hdr->hdr.l3.length;
   5660		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
   5661
   5662		if (!IS_LAYER3(card)) {
   5663			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
   5664			goto walk_packet;
   5665		}
   5666
   5667		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
   5668			linear_len = ETH_HLEN;
   5669			headroom = 0;
   5670			break;
   5671		}
   5672
   5673		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
   5674			linear_len = sizeof(struct ipv6hdr);
   5675		else
   5676			linear_len = sizeof(struct iphdr);
   5677		headroom = ETH_HLEN;
   5678		break;
   5679	default:
   5680		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
   5681			QETH_CARD_STAT_INC(card, rx_frame_errors);
   5682		else
   5683			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
   5684
   5685		/* Can't determine packet length, drop the whole buffer. */
   5686		return -EPROTONOSUPPORT;
   5687	}
   5688
   5689	if (skb_len < linear_len) {
   5690		QETH_CARD_STAT_INC(card, rx_dropped_runt);
   5691		goto walk_packet;
   5692	}
   5693
   5694	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
   5695		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
   5696		     !atomic_read(&card->force_alloc_skb));
   5697
   5698	if (use_rx_sg) {
   5699		/* QETH_CQ_ENABLED only: */
   5700		if (qethbuffer->rx_skb &&
   5701		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
   5702			skb = qethbuffer->rx_skb;
   5703			qethbuffer->rx_skb = NULL;
   5704			goto use_skb;
   5705		}
   5706
   5707		skb = napi_get_frags(napi);
   5708		if (!skb) {
   5709			/* -ENOMEM, no point in falling back further. */
   5710			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
   5711			goto walk_packet;
   5712		}
   5713
   5714		if (skb_tailroom(skb) >= linear_len + headroom) {
   5715			uses_frags = true;
   5716			goto use_skb;
   5717		}
   5718
   5719		netdev_info_once(card->dev,
   5720				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
   5721				 linear_len + headroom, skb_tailroom(skb));
   5722		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
   5723	}
   5724
   5725	linear_len = skb_len;
   5726	skb = napi_alloc_skb(napi, linear_len + headroom);
   5727	if (!skb) {
   5728		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
   5729		goto walk_packet;
   5730	}
   5731
   5732use_skb:
   5733	if (headroom)
   5734		skb_reserve(skb, headroom);
   5735walk_packet:
   5736	while (skb_len) {
   5737		int data_len = min(skb_len, (int)(element->length - offset));
   5738		char *data = phys_to_virt(element->addr) + offset;
   5739
   5740		skb_len -= data_len;
   5741		offset += data_len;
   5742
   5743		/* Extract data from current element: */
   5744		if (skb && data_len) {
   5745			if (linear_len) {
   5746				unsigned int copy_len;
   5747
   5748				copy_len = min_t(unsigned int, linear_len,
   5749						 data_len);
   5750
   5751				skb_put_data(skb, data, copy_len);
   5752				linear_len -= copy_len;
   5753				data_len -= copy_len;
   5754				data += copy_len;
   5755			}
   5756
   5757			if (data_len)
   5758				qeth_create_skb_frag(skb, data, data_len);
   5759		}
   5760
   5761		/* Step forward to next element: */
   5762		if (skb_len) {
   5763			if (qeth_is_last_sbale(element)) {
   5764				QETH_CARD_TEXT(card, 4, "unexeob");
   5765				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
   5766				if (skb) {
   5767					if (uses_frags)
   5768						napi_free_frags(napi);
   5769					else
   5770						kfree_skb(skb);
   5771					QETH_CARD_STAT_INC(card,
   5772							   rx_length_errors);
   5773				}
   5774				return -EMSGSIZE;
   5775			}
   5776			element++;
   5777			offset = 0;
   5778		}
   5779	}
   5780
   5781	/* This packet was skipped, go get another one: */
   5782	if (!skb)
   5783		goto next_packet;
   5784
   5785	*element_no = element - &buffer->element[0];
   5786	*__offset = offset;
   5787
   5788#if IS_ENABLED(CONFIG_QETH_L3)
   5789	if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3)
   5790		qeth_l3_rebuild_skb(card, skb, hdr);
   5791#endif
   5792
   5793	qeth_receive_skb(card, skb, uses_frags, is_cso);
   5794	return 0;
   5795}
   5796
   5797static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
   5798				      struct qeth_qdio_buffer *buf, bool *done)
   5799{
   5800	unsigned int work_done = 0;
   5801
   5802	while (budget) {
   5803		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
   5804				     &card->rx.e_offset)) {
   5805			*done = true;
   5806			break;
   5807		}
   5808
   5809		work_done++;
   5810		budget--;
   5811	}
   5812
   5813	return work_done;
   5814}
   5815
   5816static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
   5817{
   5818	struct qeth_rx *ctx = &card->rx;
   5819	unsigned int work_done = 0;
   5820
   5821	while (budget > 0) {
   5822		struct qeth_qdio_buffer *buffer;
   5823		unsigned int skbs_done = 0;
   5824		bool done = false;
   5825
   5826		/* Fetch completed RX buffers: */
   5827		if (!card->rx.b_count) {
   5828			card->rx.qdio_err = 0;
   5829			card->rx.b_count =
   5830				qdio_inspect_input_queue(CARD_DDEV(card), 0,
   5831							 &card->rx.b_index,
   5832							 &card->rx.qdio_err);
   5833			if (card->rx.b_count <= 0) {
   5834				card->rx.b_count = 0;
   5835				break;
   5836			}
   5837		}
   5838
   5839		/* Process one completed RX buffer: */
   5840		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
   5841		if (!(card->rx.qdio_err &&
   5842		      qeth_check_qdio_errors(card, buffer->buffer,
   5843					     card->rx.qdio_err, "qinerr")))
   5844			skbs_done = qeth_extract_skbs(card, budget, buffer,
   5845						      &done);
   5846		else
   5847			done = true;
   5848
   5849		work_done += skbs_done;
   5850		budget -= skbs_done;
   5851
   5852		if (done) {
   5853			QETH_CARD_STAT_INC(card, rx_bufs);
   5854			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
   5855			buffer->pool_entry = NULL;
   5856			card->rx.b_count--;
   5857			ctx->bufs_refill++;
   5858			ctx->bufs_refill -= qeth_rx_refill_queue(card,
   5859								 ctx->bufs_refill);
   5860
   5861			/* Step forward to next buffer: */
   5862			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
   5863			card->rx.buf_element = 0;
   5864			card->rx.e_offset = 0;
   5865		}
   5866	}
   5867
   5868	return work_done;
   5869}
   5870
   5871static void qeth_cq_poll(struct qeth_card *card)
   5872{
   5873	unsigned int work_done = 0;
   5874
   5875	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
   5876		unsigned int start, error;
   5877		int completed;
   5878
   5879		completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
   5880						     &error);
   5881		if (completed <= 0)
   5882			return;
   5883
   5884		qeth_qdio_cq_handler(card, error, 1, start, completed);
   5885		work_done += completed;
   5886	}
   5887}
   5888
   5889int qeth_poll(struct napi_struct *napi, int budget)
   5890{
   5891	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
   5892	unsigned int work_done;
   5893
   5894	work_done = qeth_rx_poll(card, budget);
   5895
   5896	if (qeth_use_tx_irqs(card)) {
   5897		struct qeth_qdio_out_q *queue;
   5898		unsigned int i;
   5899
   5900		qeth_for_each_output_queue(card, queue, i) {
   5901			if (!qeth_out_queue_is_empty(queue))
   5902				napi_schedule(&queue->napi);
   5903		}
   5904	}
   5905
   5906	if (card->options.cq == QETH_CQ_ENABLED)
   5907		qeth_cq_poll(card);
   5908
   5909	if (budget) {
   5910		struct qeth_rx *ctx = &card->rx;
   5911
   5912		/* Process any substantial refill backlog: */
   5913		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
   5914
   5915		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
   5916		if (work_done >= budget)
   5917			return work_done;
   5918	}
   5919
   5920	if (napi_complete_done(napi, work_done) &&
   5921	    qdio_start_irq(CARD_DDEV(card)))
   5922		napi_schedule(napi);
   5923
   5924	return work_done;
   5925}
   5926EXPORT_SYMBOL_GPL(qeth_poll);
   5927
   5928static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
   5929				 unsigned int bidx, unsigned int qdio_error,
   5930				 int budget)
   5931{
   5932	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
   5933	u8 sflags = buffer->buffer->element[15].sflags;
   5934	struct qeth_card *card = queue->card;
   5935	bool error = !!qdio_error;
   5936
   5937	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
   5938		struct qaob *aob = buffer->aob;
   5939		struct qeth_qaob_priv1 *priv;
   5940		enum iucv_tx_notify notify;
   5941
   5942		if (!aob) {
   5943			netdev_WARN_ONCE(card->dev,
   5944					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
   5945					 bidx, queue->queue_no);
   5946			qeth_schedule_recovery(card);
   5947			return;
   5948		}
   5949
   5950		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
   5951
   5952		priv = (struct qeth_qaob_priv1 *)&aob->user1;
   5953		/* QAOB hasn't completed yet: */
   5954		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
   5955			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
   5956
   5957			/* Prepare the queue slot for immediate re-use: */
   5958			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
   5959			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
   5960				QETH_CARD_TEXT(card, 2, "outofbuf");
   5961				qeth_schedule_recovery(card);
   5962			}
   5963
   5964			list_add(&buffer->list_entry, &queue->pending_bufs);
   5965			/* Skip clearing the buffer: */
   5966			return;
   5967		}
   5968
   5969		/* QAOB already completed: */
   5970		notify = qeth_compute_cq_notification(aob->aorc, 0);
   5971		qeth_notify_skbs(queue, buffer, notify);
   5972		error = !!aob->aorc;
   5973		memset(aob, 0, sizeof(*aob));
   5974	} else if (card->options.cq == QETH_CQ_ENABLED) {
   5975		qeth_notify_skbs(queue, buffer,
   5976				 qeth_compute_cq_notification(sflags, 0));
   5977	}
   5978
   5979	qeth_clear_output_buffer(queue, buffer, error, budget);
   5980}
   5981
   5982static int qeth_tx_poll(struct napi_struct *napi, int budget)
   5983{
   5984	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
   5985	unsigned int queue_no = queue->queue_no;
   5986	struct qeth_card *card = queue->card;
   5987	struct net_device *dev = card->dev;
   5988	unsigned int work_done = 0;
   5989	struct netdev_queue *txq;
   5990
   5991	if (IS_IQD(card))
   5992		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
   5993	else
   5994		txq = netdev_get_tx_queue(dev, queue_no);
   5995
   5996	while (1) {
   5997		unsigned int start, error, i;
   5998		unsigned int packets = 0;
   5999		unsigned int bytes = 0;
   6000		int completed;
   6001
   6002		qeth_tx_complete_pending_bufs(card, queue, false, budget);
   6003
   6004		if (qeth_out_queue_is_empty(queue)) {
   6005			napi_complete(napi);
   6006			return 0;
   6007		}
   6008
   6009		/* Give the CPU a breather: */
   6010		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
   6011			QETH_TXQ_STAT_INC(queue, completion_yield);
   6012			if (napi_complete_done(napi, 0))
   6013				napi_schedule(napi);
   6014			return 0;
   6015		}
   6016
   6017		completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
   6018						      &start, &error);
   6019		if (completed <= 0) {
   6020			/* Ensure we see TX completion for pending work: */
   6021			if (napi_complete_done(napi, 0) &&
   6022			    !atomic_read(&queue->set_pci_flags_count))
   6023				qeth_tx_arm_timer(queue, queue->rescan_usecs);
   6024			return 0;
   6025		}
   6026
   6027		for (i = start; i < start + completed; i++) {
   6028			struct qeth_qdio_out_buffer *buffer;
   6029			unsigned int bidx = QDIO_BUFNR(i);
   6030
   6031			buffer = queue->bufs[bidx];
   6032			packets += buffer->frames;
   6033			bytes += buffer->bytes;
   6034
   6035			qeth_handle_send_error(card, buffer, error);
   6036			if (IS_IQD(card))
   6037				qeth_iqd_tx_complete(queue, bidx, error, budget);
   6038			else
   6039				qeth_clear_output_buffer(queue, buffer, error,
   6040							 budget);
   6041		}
   6042
   6043		atomic_sub(completed, &queue->used_buffers);
   6044		work_done += completed;
   6045		if (IS_IQD(card))
   6046			netdev_tx_completed_queue(txq, packets, bytes);
   6047		else
   6048			qeth_check_outbound_queue(queue);
   6049
   6050		/* xmit may have observed the full-condition, but not yet
   6051		 * stopped the txq. In which case the code below won't trigger.
   6052		 * So before returning, xmit will re-check the txq's fill level
   6053		 * and wake it up if needed.
   6054		 */
   6055		if (netif_tx_queue_stopped(txq) &&
   6056		    !qeth_out_queue_is_full(queue))
   6057			netif_tx_wake_queue(txq);
   6058	}
   6059}
   6060
   6061static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
   6062{
   6063	if (!cmd->hdr.return_code)
   6064		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
   6065	return cmd->hdr.return_code;
   6066}
   6067
   6068static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
   6069					struct qeth_reply *reply,
   6070					unsigned long data)
   6071{
   6072	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   6073	struct qeth_ipa_caps *caps = reply->param;
   6074
   6075	if (qeth_setassparms_inspect_rc(cmd))
   6076		return -EIO;
   6077
   6078	caps->supported = cmd->data.setassparms.data.caps.supported;
   6079	caps->enabled = cmd->data.setassparms.data.caps.enabled;
   6080	return 0;
   6081}
   6082
   6083int qeth_setassparms_cb(struct qeth_card *card,
   6084			struct qeth_reply *reply, unsigned long data)
   6085{
   6086	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   6087
   6088	QETH_CARD_TEXT(card, 4, "defadpcb");
   6089
   6090	if (cmd->hdr.return_code)
   6091		return -EIO;
   6092
   6093	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
   6094	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
   6095		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
   6096	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
   6097		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
   6098	return 0;
   6099}
   6100EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
   6101
   6102struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
   6103						 enum qeth_ipa_funcs ipa_func,
   6104						 u16 cmd_code,
   6105						 unsigned int data_length,
   6106						 enum qeth_prot_versions prot)
   6107{
   6108	struct qeth_ipacmd_setassparms *setassparms;
   6109	struct qeth_ipacmd_setassparms_hdr *hdr;
   6110	struct qeth_cmd_buffer *iob;
   6111
   6112	QETH_CARD_TEXT(card, 4, "getasscm");
   6113	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
   6114				 data_length +
   6115				 offsetof(struct qeth_ipacmd_setassparms,
   6116					  data));
   6117	if (!iob)
   6118		return NULL;
   6119
   6120	setassparms = &__ipa_cmd(iob)->data.setassparms;
   6121	setassparms->assist_no = ipa_func;
   6122
   6123	hdr = &setassparms->hdr;
   6124	hdr->length = sizeof(*hdr) + data_length;
   6125	hdr->command_code = cmd_code;
   6126	return iob;
   6127}
   6128EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
   6129
   6130int qeth_send_simple_setassparms_prot(struct qeth_card *card,
   6131				      enum qeth_ipa_funcs ipa_func,
   6132				      u16 cmd_code, u32 *data,
   6133				      enum qeth_prot_versions prot)
   6134{
   6135	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
   6136	struct qeth_cmd_buffer *iob;
   6137
   6138	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
   6139	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
   6140	if (!iob)
   6141		return -ENOMEM;
   6142
   6143	if (data)
   6144		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
   6145	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
   6146}
   6147EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
   6148
   6149static void qeth_unregister_dbf_views(void)
   6150{
   6151	int x;
   6152
   6153	for (x = 0; x < QETH_DBF_INFOS; x++) {
   6154		debug_unregister(qeth_dbf[x].id);
   6155		qeth_dbf[x].id = NULL;
   6156	}
   6157}
   6158
   6159void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
   6160{
   6161	char dbf_txt_buf[32];
   6162	va_list args;
   6163
   6164	if (!debug_level_enabled(id, level))
   6165		return;
   6166	va_start(args, fmt);
   6167	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
   6168	va_end(args);
   6169	debug_text_event(id, level, dbf_txt_buf);
   6170}
   6171EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
   6172
   6173static int qeth_register_dbf_views(void)
   6174{
   6175	int ret;
   6176	int x;
   6177
   6178	for (x = 0; x < QETH_DBF_INFOS; x++) {
   6179		/* register the areas */
   6180		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
   6181						qeth_dbf[x].pages,
   6182						qeth_dbf[x].areas,
   6183						qeth_dbf[x].len);
   6184		if (qeth_dbf[x].id == NULL) {
   6185			qeth_unregister_dbf_views();
   6186			return -ENOMEM;
   6187		}
   6188
   6189		/* register a view */
   6190		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
   6191		if (ret) {
   6192			qeth_unregister_dbf_views();
   6193			return ret;
   6194		}
   6195
   6196		/* set a passing level */
   6197		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
   6198	}
   6199
   6200	return 0;
   6201}
   6202
   6203static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */
   6204
   6205int qeth_setup_discipline(struct qeth_card *card,
   6206			  enum qeth_discipline_id discipline)
   6207{
   6208	int rc;
   6209
   6210	mutex_lock(&qeth_mod_mutex);
   6211	switch (discipline) {
   6212	case QETH_DISCIPLINE_LAYER3:
   6213		card->discipline = try_then_request_module(
   6214			symbol_get(qeth_l3_discipline), "qeth_l3");
   6215		break;
   6216	case QETH_DISCIPLINE_LAYER2:
   6217		card->discipline = try_then_request_module(
   6218			symbol_get(qeth_l2_discipline), "qeth_l2");
   6219		break;
   6220	default:
   6221		break;
   6222	}
   6223	mutex_unlock(&qeth_mod_mutex);
   6224
   6225	if (!card->discipline) {
   6226		dev_err(&card->gdev->dev, "There is no kernel module to "
   6227			"support discipline %d\n", discipline);
   6228		return -EINVAL;
   6229	}
   6230
   6231	rc = card->discipline->setup(card->gdev);
   6232	if (rc) {
   6233		if (discipline == QETH_DISCIPLINE_LAYER2)
   6234			symbol_put(qeth_l2_discipline);
   6235		else
   6236			symbol_put(qeth_l3_discipline);
   6237		card->discipline = NULL;
   6238
   6239		return rc;
   6240	}
   6241
   6242	card->options.layer = discipline;
   6243	return 0;
   6244}
   6245
   6246void qeth_remove_discipline(struct qeth_card *card)
   6247{
   6248	card->discipline->remove(card->gdev);
   6249
   6250	if (IS_LAYER2(card))
   6251		symbol_put(qeth_l2_discipline);
   6252	else
   6253		symbol_put(qeth_l3_discipline);
   6254	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
   6255	card->discipline = NULL;
   6256}
   6257
   6258static const struct device_type qeth_generic_devtype = {
   6259	.name = "qeth_generic",
   6260};
   6261
   6262#define DBF_NAME_LEN	20
   6263
   6264struct qeth_dbf_entry {
   6265	char dbf_name[DBF_NAME_LEN];
   6266	debug_info_t *dbf_info;
   6267	struct list_head dbf_list;
   6268};
   6269
   6270static LIST_HEAD(qeth_dbf_list);
   6271static DEFINE_MUTEX(qeth_dbf_list_mutex);
   6272
   6273static debug_info_t *qeth_get_dbf_entry(char *name)
   6274{
   6275	struct qeth_dbf_entry *entry;
   6276	debug_info_t *rc = NULL;
   6277
   6278	mutex_lock(&qeth_dbf_list_mutex);
   6279	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
   6280		if (strcmp(entry->dbf_name, name) == 0) {
   6281			rc = entry->dbf_info;
   6282			break;
   6283		}
   6284	}
   6285	mutex_unlock(&qeth_dbf_list_mutex);
   6286	return rc;
   6287}
   6288
   6289static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
   6290{
   6291	struct qeth_dbf_entry *new_entry;
   6292
   6293	card->debug = debug_register(name, 2, 1, 8);
   6294	if (!card->debug) {
   6295		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
   6296		goto err;
   6297	}
   6298	if (debug_register_view(card->debug, &debug_hex_ascii_view))
   6299		goto err_dbg;
   6300	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
   6301	if (!new_entry)
   6302		goto err_dbg;
   6303	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
   6304	new_entry->dbf_info = card->debug;
   6305	mutex_lock(&qeth_dbf_list_mutex);
   6306	list_add(&new_entry->dbf_list, &qeth_dbf_list);
   6307	mutex_unlock(&qeth_dbf_list_mutex);
   6308
   6309	return 0;
   6310
   6311err_dbg:
   6312	debug_unregister(card->debug);
   6313err:
   6314	return -ENOMEM;
   6315}
   6316
   6317static void qeth_clear_dbf_list(void)
   6318{
   6319	struct qeth_dbf_entry *entry, *tmp;
   6320
   6321	mutex_lock(&qeth_dbf_list_mutex);
   6322	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
   6323		list_del(&entry->dbf_list);
   6324		debug_unregister(entry->dbf_info);
   6325		kfree(entry);
   6326	}
   6327	mutex_unlock(&qeth_dbf_list_mutex);
   6328}
   6329
   6330static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
   6331{
   6332	struct net_device *dev;
   6333	struct qeth_priv *priv;
   6334
   6335	switch (card->info.type) {
   6336	case QETH_CARD_TYPE_IQD:
   6337		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
   6338				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
   6339		break;
   6340	case QETH_CARD_TYPE_OSM:
   6341		dev = alloc_etherdev(sizeof(*priv));
   6342		break;
   6343	default:
   6344		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
   6345	}
   6346
   6347	if (!dev)
   6348		return NULL;
   6349
   6350	priv = netdev_priv(dev);
   6351	priv->rx_copybreak = QETH_RX_COPYBREAK;
   6352	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
   6353
   6354	dev->ml_priv = card;
   6355	dev->watchdog_timeo = QETH_TX_TIMEOUT;
   6356	dev->min_mtu = 576;
   6357	 /* initialized when device first goes online: */
   6358	dev->max_mtu = 0;
   6359	dev->mtu = 0;
   6360	SET_NETDEV_DEV(dev, &card->gdev->dev);
   6361	netif_carrier_off(dev);
   6362
   6363	dev->ethtool_ops = &qeth_ethtool_ops;
   6364	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
   6365	dev->hw_features |= NETIF_F_SG;
   6366	dev->vlan_features |= NETIF_F_SG;
   6367	if (IS_IQD(card))
   6368		dev->features |= NETIF_F_SG;
   6369
   6370	return dev;
   6371}
   6372
   6373struct net_device *qeth_clone_netdev(struct net_device *orig)
   6374{
   6375	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
   6376
   6377	if (!clone)
   6378		return NULL;
   6379
   6380	clone->dev_port = orig->dev_port;
   6381	return clone;
   6382}
   6383
   6384static int qeth_core_probe_device(struct ccwgroup_device *gdev)
   6385{
   6386	struct qeth_card *card;
   6387	struct device *dev;
   6388	int rc;
   6389	enum qeth_discipline_id enforced_disc;
   6390	char dbf_name[DBF_NAME_LEN];
   6391
   6392	QETH_DBF_TEXT(SETUP, 2, "probedev");
   6393
   6394	dev = &gdev->dev;
   6395	if (!get_device(dev))
   6396		return -ENODEV;
   6397
   6398	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
   6399
   6400	card = qeth_alloc_card(gdev);
   6401	if (!card) {
   6402		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
   6403		rc = -ENOMEM;
   6404		goto err_dev;
   6405	}
   6406
   6407	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
   6408		dev_name(&gdev->dev));
   6409	card->debug = qeth_get_dbf_entry(dbf_name);
   6410	if (!card->debug) {
   6411		rc = qeth_add_dbf_entry(card, dbf_name);
   6412		if (rc)
   6413			goto err_card;
   6414	}
   6415
   6416	qeth_setup_card(card);
   6417	card->dev = qeth_alloc_netdev(card);
   6418	if (!card->dev) {
   6419		rc = -ENOMEM;
   6420		goto err_card;
   6421	}
   6422
   6423	qeth_determine_capabilities(card);
   6424	qeth_set_blkt_defaults(card);
   6425
   6426	card->qdio.in_q = qeth_alloc_qdio_queue();
   6427	if (!card->qdio.in_q) {
   6428		rc = -ENOMEM;
   6429		goto err_rx_queue;
   6430	}
   6431
   6432	card->qdio.no_out_queues = card->dev->num_tx_queues;
   6433	rc = qeth_update_from_chp_desc(card);
   6434	if (rc)
   6435		goto err_chp_desc;
   6436
   6437	gdev->dev.groups = qeth_dev_groups;
   6438
   6439	enforced_disc = qeth_enforce_discipline(card);
   6440	switch (enforced_disc) {
   6441	case QETH_DISCIPLINE_UNDETERMINED:
   6442		gdev->dev.type = &qeth_generic_devtype;
   6443		break;
   6444	default:
   6445		card->info.layer_enforced = true;
   6446		/* It's so early that we don't need the discipline_mutex yet. */
   6447		rc = qeth_setup_discipline(card, enforced_disc);
   6448		if (rc)
   6449			goto err_setup_disc;
   6450
   6451		break;
   6452	}
   6453
   6454	return 0;
   6455
   6456err_setup_disc:
   6457err_chp_desc:
   6458	qeth_free_qdio_queue(card->qdio.in_q);
   6459err_rx_queue:
   6460	free_netdev(card->dev);
   6461err_card:
   6462	qeth_core_free_card(card);
   6463err_dev:
   6464	put_device(dev);
   6465	return rc;
   6466}
   6467
   6468static void qeth_core_remove_device(struct ccwgroup_device *gdev)
   6469{
   6470	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
   6471
   6472	QETH_CARD_TEXT(card, 2, "removedv");
   6473
   6474	mutex_lock(&card->discipline_mutex);
   6475	if (card->discipline)
   6476		qeth_remove_discipline(card);
   6477	mutex_unlock(&card->discipline_mutex);
   6478
   6479	qeth_free_qdio_queues(card);
   6480
   6481	qeth_free_qdio_queue(card->qdio.in_q);
   6482	free_netdev(card->dev);
   6483	qeth_core_free_card(card);
   6484	put_device(&gdev->dev);
   6485}
   6486
   6487static int qeth_core_set_online(struct ccwgroup_device *gdev)
   6488{
   6489	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
   6490	int rc = 0;
   6491	enum qeth_discipline_id def_discipline;
   6492
   6493	mutex_lock(&card->discipline_mutex);
   6494	if (!card->discipline) {
   6495		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
   6496						QETH_DISCIPLINE_LAYER2;
   6497		rc = qeth_setup_discipline(card, def_discipline);
   6498		if (rc)
   6499			goto err;
   6500	}
   6501
   6502	rc = qeth_set_online(card, card->discipline);
   6503
   6504err:
   6505	mutex_unlock(&card->discipline_mutex);
   6506	return rc;
   6507}
   6508
   6509static int qeth_core_set_offline(struct ccwgroup_device *gdev)
   6510{
   6511	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
   6512	int rc;
   6513
   6514	mutex_lock(&card->discipline_mutex);
   6515	rc = qeth_set_offline(card, card->discipline, false);
   6516	mutex_unlock(&card->discipline_mutex);
   6517
   6518	return rc;
   6519}
   6520
   6521static void qeth_core_shutdown(struct ccwgroup_device *gdev)
   6522{
   6523	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
   6524
   6525	qeth_set_allowed_threads(card, 0, 1);
   6526	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
   6527		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
   6528	qeth_qdio_clear_card(card, 0);
   6529	qeth_drain_output_queues(card);
   6530	qdio_free(CARD_DDEV(card));
   6531}
   6532
   6533static ssize_t group_store(struct device_driver *ddrv, const char *buf,
   6534			   size_t count)
   6535{
   6536	int err;
   6537
   6538	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
   6539				  buf);
   6540
   6541	return err ? err : count;
   6542}
   6543static DRIVER_ATTR_WO(group);
   6544
   6545static struct attribute *qeth_drv_attrs[] = {
   6546	&driver_attr_group.attr,
   6547	NULL,
   6548};
   6549static struct attribute_group qeth_drv_attr_group = {
   6550	.attrs = qeth_drv_attrs,
   6551};
   6552static const struct attribute_group *qeth_drv_attr_groups[] = {
   6553	&qeth_drv_attr_group,
   6554	NULL,
   6555};
   6556
   6557static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
   6558	.driver = {
   6559		.groups = qeth_drv_attr_groups,
   6560		.owner = THIS_MODULE,
   6561		.name = "qeth",
   6562	},
   6563	.ccw_driver = &qeth_ccw_driver,
   6564	.setup = qeth_core_probe_device,
   6565	.remove = qeth_core_remove_device,
   6566	.set_online = qeth_core_set_online,
   6567	.set_offline = qeth_core_set_offline,
   6568	.shutdown = qeth_core_shutdown,
   6569};
   6570
   6571int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
   6572{
   6573	struct qeth_card *card = dev->ml_priv;
   6574	int rc = 0;
   6575
   6576	switch (cmd) {
   6577	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
   6578		rc = qeth_snmp_command(card, data);
   6579		break;
   6580	case SIOC_QETH_GET_CARD_TYPE:
   6581		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
   6582		    !IS_VM_NIC(card))
   6583			return 1;
   6584		return 0;
   6585	case SIOC_QETH_QUERY_OAT:
   6586		rc = qeth_query_oat_command(card, data);
   6587		break;
   6588	default:
   6589		rc = -EOPNOTSUPP;
   6590	}
   6591	if (rc)
   6592		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
   6593	return rc;
   6594}
   6595EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
   6596
   6597int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
   6598{
   6599	struct qeth_card *card = dev->ml_priv;
   6600	struct mii_ioctl_data *mii_data;
   6601	int rc = 0;
   6602
   6603	switch (cmd) {
   6604	case SIOCGMIIPHY:
   6605		mii_data = if_mii(rq);
   6606		mii_data->phy_id = 0;
   6607		break;
   6608	case SIOCGMIIREG:
   6609		mii_data = if_mii(rq);
   6610		if (mii_data->phy_id != 0)
   6611			rc = -EINVAL;
   6612		else
   6613			mii_data->val_out = qeth_mdio_read(dev,
   6614				mii_data->phy_id, mii_data->reg_num);
   6615		break;
   6616	default:
   6617		return -EOPNOTSUPP;
   6618	}
   6619	if (rc)
   6620		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
   6621	return rc;
   6622}
   6623EXPORT_SYMBOL_GPL(qeth_do_ioctl);
   6624
   6625static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
   6626			      unsigned long data)
   6627{
   6628	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   6629	u32 *features = reply->param;
   6630
   6631	if (qeth_setassparms_inspect_rc(cmd))
   6632		return -EIO;
   6633
   6634	*features = cmd->data.setassparms.data.flags_32bit;
   6635	return 0;
   6636}
   6637
   6638static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
   6639			     enum qeth_prot_versions prot)
   6640{
   6641	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
   6642						 NULL, prot);
   6643}
   6644
   6645static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
   6646			    enum qeth_prot_versions prot, u8 *lp2lp)
   6647{
   6648	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
   6649	struct qeth_cmd_buffer *iob;
   6650	struct qeth_ipa_caps caps;
   6651	u32 features;
   6652	int rc;
   6653
   6654	/* some L3 HW requires combined L3+L4 csum offload: */
   6655	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
   6656	    cstype == IPA_OUTBOUND_CHECKSUM)
   6657		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
   6658
   6659	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
   6660				       prot);
   6661	if (!iob)
   6662		return -ENOMEM;
   6663
   6664	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
   6665	if (rc)
   6666		return rc;
   6667
   6668	if ((required_features & features) != required_features) {
   6669		qeth_set_csum_off(card, cstype, prot);
   6670		return -EOPNOTSUPP;
   6671	}
   6672
   6673	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
   6674				       SETASS_DATA_SIZEOF(flags_32bit),
   6675				       prot);
   6676	if (!iob) {
   6677		qeth_set_csum_off(card, cstype, prot);
   6678		return -ENOMEM;
   6679	}
   6680
   6681	if (features & QETH_IPA_CHECKSUM_LP2LP)
   6682		required_features |= QETH_IPA_CHECKSUM_LP2LP;
   6683	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
   6684	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
   6685	if (rc) {
   6686		qeth_set_csum_off(card, cstype, prot);
   6687		return rc;
   6688	}
   6689
   6690	if (!qeth_ipa_caps_supported(&caps, required_features) ||
   6691	    !qeth_ipa_caps_enabled(&caps, required_features)) {
   6692		qeth_set_csum_off(card, cstype, prot);
   6693		return -EOPNOTSUPP;
   6694	}
   6695
   6696	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
   6697		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
   6698
   6699	if (lp2lp)
   6700		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
   6701
   6702	return 0;
   6703}
   6704
   6705static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
   6706			     enum qeth_prot_versions prot, u8 *lp2lp)
   6707{
   6708	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
   6709		    qeth_set_csum_off(card, cstype, prot);
   6710}
   6711
   6712static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
   6713			     unsigned long data)
   6714{
   6715	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
   6716	struct qeth_tso_start_data *tso_data = reply->param;
   6717
   6718	if (qeth_setassparms_inspect_rc(cmd))
   6719		return -EIO;
   6720
   6721	tso_data->mss = cmd->data.setassparms.data.tso.mss;
   6722	tso_data->supported = cmd->data.setassparms.data.tso.supported;
   6723	return 0;
   6724}
   6725
   6726static int qeth_set_tso_off(struct qeth_card *card,
   6727			    enum qeth_prot_versions prot)
   6728{
   6729	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
   6730						 IPA_CMD_ASS_STOP, NULL, prot);
   6731}
   6732
   6733static int qeth_set_tso_on(struct qeth_card *card,
   6734			   enum qeth_prot_versions prot)
   6735{
   6736	struct qeth_tso_start_data tso_data;
   6737	struct qeth_cmd_buffer *iob;
   6738	struct qeth_ipa_caps caps;
   6739	int rc;
   6740
   6741	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
   6742				       IPA_CMD_ASS_START, 0, prot);
   6743	if (!iob)
   6744		return -ENOMEM;
   6745
   6746	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
   6747	if (rc)
   6748		return rc;
   6749
   6750	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
   6751		qeth_set_tso_off(card, prot);
   6752		return -EOPNOTSUPP;
   6753	}
   6754
   6755	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
   6756				       IPA_CMD_ASS_ENABLE,
   6757				       SETASS_DATA_SIZEOF(caps), prot);
   6758	if (!iob) {
   6759		qeth_set_tso_off(card, prot);
   6760		return -ENOMEM;
   6761	}
   6762
   6763	/* enable TSO capability */
   6764	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
   6765		QETH_IPA_LARGE_SEND_TCP;
   6766	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
   6767	if (rc) {
   6768		qeth_set_tso_off(card, prot);
   6769		return rc;
   6770	}
   6771
   6772	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
   6773	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
   6774		qeth_set_tso_off(card, prot);
   6775		return -EOPNOTSUPP;
   6776	}
   6777
   6778	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
   6779		 tso_data.mss);
   6780	return 0;
   6781}
   6782
   6783static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
   6784			    enum qeth_prot_versions prot)
   6785{
   6786	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
   6787}
   6788
   6789static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
   6790{
   6791	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
   6792	int rc_ipv6;
   6793
   6794	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
   6795		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
   6796					    QETH_PROT_IPV4, NULL);
   6797	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
   6798		/* no/one Offload Assist available, so the rc is trivial */
   6799		return rc_ipv4;
   6800
   6801	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
   6802				    QETH_PROT_IPV6, NULL);
   6803
   6804	if (on)
   6805		/* enable: success if any Assist is active */
   6806		return (rc_ipv6) ? rc_ipv4 : 0;
   6807
   6808	/* disable: failure if any Assist is still active */
   6809	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
   6810}
   6811
   6812/**
   6813 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
   6814 * @dev:	a net_device
   6815 */
   6816void qeth_enable_hw_features(struct net_device *dev)
   6817{
   6818	struct qeth_card *card = dev->ml_priv;
   6819	netdev_features_t features;
   6820
   6821	features = dev->features;
   6822	/* force-off any feature that might need an IPA sequence.
   6823	 * netdev_update_features() will restart them.
   6824	 */
   6825	dev->features &= ~dev->hw_features;
   6826	/* toggle VLAN filter, so that VIDs are re-programmed: */
   6827	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
   6828		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
   6829		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
   6830	}
   6831	netdev_update_features(dev);
   6832	if (features != dev->features)
   6833		dev_warn(&card->gdev->dev,
   6834			 "Device recovery failed to restore all offload features\n");
   6835}
   6836EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
   6837
   6838static void qeth_check_restricted_features(struct qeth_card *card,
   6839					   netdev_features_t changed,
   6840					   netdev_features_t actual)
   6841{
   6842	netdev_features_t ipv6_features = NETIF_F_TSO6;
   6843	netdev_features_t ipv4_features = NETIF_F_TSO;
   6844
   6845	if (!card->info.has_lp2lp_cso_v6)
   6846		ipv6_features |= NETIF_F_IPV6_CSUM;
   6847	if (!card->info.has_lp2lp_cso_v4)
   6848		ipv4_features |= NETIF_F_IP_CSUM;
   6849
   6850	if ((changed & ipv6_features) && !(actual & ipv6_features))
   6851		qeth_flush_local_addrs6(card);
   6852	if ((changed & ipv4_features) && !(actual & ipv4_features))
   6853		qeth_flush_local_addrs4(card);
   6854}
   6855
   6856int qeth_set_features(struct net_device *dev, netdev_features_t features)
   6857{
   6858	struct qeth_card *card = dev->ml_priv;
   6859	netdev_features_t changed = dev->features ^ features;
   6860	int rc = 0;
   6861
   6862	QETH_CARD_TEXT(card, 2, "setfeat");
   6863	QETH_CARD_HEX(card, 2, &features, sizeof(features));
   6864
   6865	if ((changed & NETIF_F_IP_CSUM)) {
   6866		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
   6867				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
   6868				       &card->info.has_lp2lp_cso_v4);
   6869		if (rc)
   6870			changed ^= NETIF_F_IP_CSUM;
   6871	}
   6872	if (changed & NETIF_F_IPV6_CSUM) {
   6873		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
   6874				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
   6875				       &card->info.has_lp2lp_cso_v6);
   6876		if (rc)
   6877			changed ^= NETIF_F_IPV6_CSUM;
   6878	}
   6879	if (changed & NETIF_F_RXCSUM) {
   6880		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
   6881		if (rc)
   6882			changed ^= NETIF_F_RXCSUM;
   6883	}
   6884	if (changed & NETIF_F_TSO) {
   6885		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
   6886				      QETH_PROT_IPV4);
   6887		if (rc)
   6888			changed ^= NETIF_F_TSO;
   6889	}
   6890	if (changed & NETIF_F_TSO6) {
   6891		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
   6892				      QETH_PROT_IPV6);
   6893		if (rc)
   6894			changed ^= NETIF_F_TSO6;
   6895	}
   6896
   6897	qeth_check_restricted_features(card, dev->features ^ features,
   6898				       dev->features ^ changed);
   6899
   6900	/* everything changed successfully? */
   6901	if ((dev->features ^ features) == changed)
   6902		return 0;
   6903	/* something went wrong. save changed features and return error */
   6904	dev->features ^= changed;
   6905	return -EIO;
   6906}
   6907EXPORT_SYMBOL_GPL(qeth_set_features);
   6908
   6909netdev_features_t qeth_fix_features(struct net_device *dev,
   6910				    netdev_features_t features)
   6911{
   6912	struct qeth_card *card = dev->ml_priv;
   6913
   6914	QETH_CARD_TEXT(card, 2, "fixfeat");
   6915	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
   6916		features &= ~NETIF_F_IP_CSUM;
   6917	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
   6918		features &= ~NETIF_F_IPV6_CSUM;
   6919	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
   6920	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
   6921		features &= ~NETIF_F_RXCSUM;
   6922	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
   6923		features &= ~NETIF_F_TSO;
   6924	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
   6925		features &= ~NETIF_F_TSO6;
   6926
   6927	QETH_CARD_HEX(card, 2, &features, sizeof(features));
   6928	return features;
   6929}
   6930EXPORT_SYMBOL_GPL(qeth_fix_features);
   6931
   6932netdev_features_t qeth_features_check(struct sk_buff *skb,
   6933				      struct net_device *dev,
   6934				      netdev_features_t features)
   6935{
   6936	struct qeth_card *card = dev->ml_priv;
   6937
   6938	/* Traffic with local next-hop is not eligible for some offloads: */
   6939	if (skb->ip_summed == CHECKSUM_PARTIAL &&
   6940	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
   6941		netdev_features_t restricted = 0;
   6942
   6943		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
   6944			restricted |= NETIF_F_ALL_TSO;
   6945
   6946		switch (vlan_get_protocol(skb)) {
   6947		case htons(ETH_P_IP):
   6948			if (!card->info.has_lp2lp_cso_v4)
   6949				restricted |= NETIF_F_IP_CSUM;
   6950
   6951			if (restricted && qeth_next_hop_is_local_v4(card, skb))
   6952				features &= ~restricted;
   6953			break;
   6954		case htons(ETH_P_IPV6):
   6955			if (!card->info.has_lp2lp_cso_v6)
   6956				restricted |= NETIF_F_IPV6_CSUM;
   6957
   6958			if (restricted && qeth_next_hop_is_local_v6(card, skb))
   6959				features &= ~restricted;
   6960			break;
   6961		default:
   6962			break;
   6963		}
   6964	}
   6965
   6966	/* GSO segmentation builds skbs with
   6967	 *	a (small) linear part for the headers, and
   6968	 *	page frags for the data.
   6969	 * Compared to a linear skb, the header-only part consumes an
   6970	 * additional buffer element. This reduces buffer utilization, and
   6971	 * hurts throughput. So compress small segments into one element.
   6972	 */
   6973	if (netif_needs_gso(skb, features)) {
   6974		/* match skb_segment(): */
   6975		unsigned int doffset = skb->data - skb_mac_header(skb);
   6976		unsigned int hsize = skb_shinfo(skb)->gso_size;
   6977		unsigned int hroom = skb_headroom(skb);
   6978
   6979		/* linearize only if resulting skb allocations are order-0: */
   6980		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
   6981			features &= ~NETIF_F_SG;
   6982	}
   6983
   6984	return vlan_features_check(skb, features);
   6985}
   6986EXPORT_SYMBOL_GPL(qeth_features_check);
   6987
   6988void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
   6989{
   6990	struct qeth_card *card = dev->ml_priv;
   6991	struct qeth_qdio_out_q *queue;
   6992	unsigned int i;
   6993
   6994	QETH_CARD_TEXT(card, 5, "getstat");
   6995
   6996	stats->rx_packets = card->stats.rx_packets;
   6997	stats->rx_bytes = card->stats.rx_bytes;
   6998	stats->rx_errors = card->stats.rx_length_errors +
   6999			   card->stats.rx_frame_errors +
   7000			   card->stats.rx_fifo_errors;
   7001	stats->rx_dropped = card->stats.rx_dropped_nomem +
   7002			    card->stats.rx_dropped_notsupp +
   7003			    card->stats.rx_dropped_runt;
   7004	stats->multicast = card->stats.rx_multicast;
   7005	stats->rx_length_errors = card->stats.rx_length_errors;
   7006	stats->rx_frame_errors = card->stats.rx_frame_errors;
   7007	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
   7008
   7009	for (i = 0; i < card->qdio.no_out_queues; i++) {
   7010		queue = card->qdio.out_qs[i];
   7011
   7012		stats->tx_packets += queue->stats.tx_packets;
   7013		stats->tx_bytes += queue->stats.tx_bytes;
   7014		stats->tx_errors += queue->stats.tx_errors;
   7015		stats->tx_dropped += queue->stats.tx_dropped;
   7016	}
   7017}
   7018EXPORT_SYMBOL_GPL(qeth_get_stats64);
   7019
   7020#define TC_IQD_UCAST   0
   7021static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
   7022				     unsigned int ucast_txqs)
   7023{
   7024	unsigned int prio;
   7025
   7026	/* IQD requires mcast traffic to be placed on a dedicated queue, and
   7027	 * qeth_iqd_select_queue() deals with this.
   7028	 * For unicast traffic, we defer the queue selection to the stack.
   7029	 * By installing a trivial prio map that spans over only the unicast
   7030	 * queues, we can encourage the stack to spread the ucast traffic evenly
   7031	 * without selecting the mcast queue.
   7032	 */
   7033
   7034	/* One traffic class, spanning over all active ucast queues: */
   7035	netdev_set_num_tc(dev, 1);
   7036	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
   7037			    QETH_IQD_MIN_UCAST_TXQ);
   7038
   7039	/* Map all priorities to this traffic class: */
   7040	for (prio = 0; prio <= TC_BITMASK; prio++)
   7041		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
   7042}
   7043
   7044int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
   7045{
   7046	struct net_device *dev = card->dev;
   7047	int rc;
   7048
   7049	/* Per netif_setup_tc(), adjust the mapping first: */
   7050	if (IS_IQD(card))
   7051		qeth_iqd_set_prio_tc_map(dev, count - 1);
   7052
   7053	rc = netif_set_real_num_tx_queues(dev, count);
   7054
   7055	if (rc && IS_IQD(card))
   7056		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
   7057
   7058	return rc;
   7059}
   7060EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
   7061
   7062u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
   7063			  u8 cast_type, struct net_device *sb_dev)
   7064{
   7065	u16 txq;
   7066
   7067	if (cast_type != RTN_UNICAST)
   7068		return QETH_IQD_MCAST_TXQ;
   7069	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
   7070		return QETH_IQD_MIN_UCAST_TXQ;
   7071
   7072	txq = netdev_pick_tx(dev, skb, sb_dev);
   7073	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
   7074}
   7075EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
   7076
   7077u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
   7078			  struct net_device *sb_dev)
   7079{
   7080	struct qeth_card *card = dev->ml_priv;
   7081
   7082	if (qeth_uses_tx_prio_queueing(card))
   7083		return qeth_get_priority_queue(card, skb);
   7084
   7085	return netdev_pick_tx(dev, skb, sb_dev);
   7086}
   7087EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
   7088
   7089int qeth_open(struct net_device *dev)
   7090{
   7091	struct qeth_card *card = dev->ml_priv;
   7092	struct qeth_qdio_out_q *queue;
   7093	unsigned int i;
   7094
   7095	QETH_CARD_TEXT(card, 4, "qethopen");
   7096
   7097	card->data.state = CH_STATE_UP;
   7098	netif_tx_start_all_queues(dev);
   7099
   7100	local_bh_disable();
   7101	qeth_for_each_output_queue(card, queue, i) {
   7102		netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
   7103		napi_enable(&queue->napi);
   7104		napi_schedule(&queue->napi);
   7105	}
   7106
   7107	napi_enable(&card->napi);
   7108	napi_schedule(&card->napi);
   7109	/* kick-start the NAPI softirq: */
   7110	local_bh_enable();
   7111
   7112	return 0;
   7113}
   7114EXPORT_SYMBOL_GPL(qeth_open);
   7115
   7116int qeth_stop(struct net_device *dev)
   7117{
   7118	struct qeth_card *card = dev->ml_priv;
   7119	struct qeth_qdio_out_q *queue;
   7120	unsigned int i;
   7121
   7122	QETH_CARD_TEXT(card, 4, "qethstop");
   7123
   7124	napi_disable(&card->napi);
   7125	cancel_delayed_work_sync(&card->buffer_reclaim_work);
   7126	qdio_stop_irq(CARD_DDEV(card));
   7127
   7128	/* Quiesce the NAPI instances: */
   7129	qeth_for_each_output_queue(card, queue, i)
   7130		napi_disable(&queue->napi);
   7131
   7132	/* Stop .ndo_start_xmit, might still access queue->napi. */
   7133	netif_tx_disable(dev);
   7134
   7135	qeth_for_each_output_queue(card, queue, i) {
   7136		del_timer_sync(&queue->timer);
   7137		/* Queues may get re-allocated, so remove the NAPIs. */
   7138		netif_napi_del(&queue->napi);
   7139	}
   7140
   7141	return 0;
   7142}
   7143EXPORT_SYMBOL_GPL(qeth_stop);
   7144
   7145static int __init qeth_core_init(void)
   7146{
   7147	int rc;
   7148
   7149	pr_info("loading core functions\n");
   7150
   7151	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
   7152
   7153	rc = qeth_register_dbf_views();
   7154	if (rc)
   7155		goto dbf_err;
   7156	qeth_core_root_dev = root_device_register("qeth");
   7157	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
   7158	if (rc)
   7159		goto register_err;
   7160	qeth_core_header_cache =
   7161		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
   7162				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
   7163				  0, NULL);
   7164	if (!qeth_core_header_cache) {
   7165		rc = -ENOMEM;
   7166		goto slab_err;
   7167	}
   7168	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
   7169			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
   7170	if (!qeth_qdio_outbuf_cache) {
   7171		rc = -ENOMEM;
   7172		goto cqslab_err;
   7173	}
   7174
   7175	qeth_qaob_cache = kmem_cache_create("qeth_qaob",
   7176					    sizeof(struct qaob),
   7177					    sizeof(struct qaob),
   7178					    0, NULL);
   7179	if (!qeth_qaob_cache) {
   7180		rc = -ENOMEM;
   7181		goto qaob_err;
   7182	}
   7183
   7184	rc = ccw_driver_register(&qeth_ccw_driver);
   7185	if (rc)
   7186		goto ccw_err;
   7187	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
   7188	if (rc)
   7189		goto ccwgroup_err;
   7190
   7191	return 0;
   7192
   7193ccwgroup_err:
   7194	ccw_driver_unregister(&qeth_ccw_driver);
   7195ccw_err:
   7196	kmem_cache_destroy(qeth_qaob_cache);
   7197qaob_err:
   7198	kmem_cache_destroy(qeth_qdio_outbuf_cache);
   7199cqslab_err:
   7200	kmem_cache_destroy(qeth_core_header_cache);
   7201slab_err:
   7202	root_device_unregister(qeth_core_root_dev);
   7203register_err:
   7204	qeth_unregister_dbf_views();
   7205dbf_err:
   7206	debugfs_remove_recursive(qeth_debugfs_root);
   7207	pr_err("Initializing the qeth device driver failed\n");
   7208	return rc;
   7209}
   7210
   7211static void __exit qeth_core_exit(void)
   7212{
   7213	qeth_clear_dbf_list();
   7214	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
   7215	ccw_driver_unregister(&qeth_ccw_driver);
   7216	kmem_cache_destroy(qeth_qaob_cache);
   7217	kmem_cache_destroy(qeth_qdio_outbuf_cache);
   7218	kmem_cache_destroy(qeth_core_header_cache);
   7219	root_device_unregister(qeth_core_root_dev);
   7220	qeth_unregister_dbf_views();
   7221	debugfs_remove_recursive(qeth_debugfs_root);
   7222	pr_info("core functions removed\n");
   7223}
   7224
   7225module_init(qeth_core_init);
   7226module_exit(qeth_core_exit);
   7227MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
   7228MODULE_DESCRIPTION("qeth core functions");
   7229MODULE_LICENSE("GPL");