cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sdio.c (68312B)


      1// SPDX-License-Identifier: ISC
      2/*
      3 * Copyright (c) 2004-2011 Atheros Communications Inc.
      4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
      5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/mmc/card.h>
     10#include <linux/mmc/mmc.h>
     11#include <linux/mmc/host.h>
     12#include <linux/mmc/sdio_func.h>
     13#include <linux/mmc/sdio_ids.h>
     14#include <linux/mmc/sdio.h>
     15#include <linux/mmc/sd.h>
     16#include <linux/bitfield.h>
     17#include "core.h"
     18#include "bmi.h"
     19#include "debug.h"
     20#include "hif.h"
     21#include "htc.h"
     22#include "mac.h"
     23#include "targaddrs.h"
     24#include "trace.h"
     25#include "sdio.h"
     26#include "coredump.h"
     27
     28void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
     29
     30#define ATH10K_SDIO_VSG_BUF_SIZE	(64 * 1024)
     31
     32/* inlined helper functions */
     33
     34static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
     35						   size_t len)
     36{
     37	return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
     38}
     39
     40static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
     41{
     42	return (enum ath10k_htc_ep_id)pipe_id;
     43}
     44
     45static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
     46{
     47	dev_kfree_skb(pkt->skb);
     48	pkt->skb = NULL;
     49	pkt->alloc_len = 0;
     50	pkt->act_len = 0;
     51	pkt->trailer_only = false;
     52}
     53
     54static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
     55						size_t act_len, size_t full_len,
     56						bool part_of_bundle,
     57						bool last_in_bundle)
     58{
     59	pkt->skb = dev_alloc_skb(full_len);
     60	if (!pkt->skb)
     61		return -ENOMEM;
     62
     63	pkt->act_len = act_len;
     64	pkt->alloc_len = full_len;
     65	pkt->part_of_bundle = part_of_bundle;
     66	pkt->last_in_bundle = last_in_bundle;
     67	pkt->trailer_only = false;
     68
     69	return 0;
     70}
     71
     72static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
     73{
     74	bool trailer_only = false;
     75	struct ath10k_htc_hdr *htc_hdr =
     76		(struct ath10k_htc_hdr *)pkt->skb->data;
     77	u16 len = __le16_to_cpu(htc_hdr->len);
     78
     79	if (len == htc_hdr->trailer_len)
     80		trailer_only = true;
     81
     82	return trailer_only;
     83}
     84
     85/* sdio/mmc functions */
     86
     87static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
     88					     unsigned int address,
     89					     unsigned char val)
     90{
     91	*arg = FIELD_PREP(BIT(31), write) |
     92	       FIELD_PREP(BIT(27), raw) |
     93	       FIELD_PREP(BIT(26), 1) |
     94	       FIELD_PREP(GENMASK(25, 9), address) |
     95	       FIELD_PREP(BIT(8), 1) |
     96	       FIELD_PREP(GENMASK(7, 0), val);
     97}
     98
     99static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
    100					   unsigned int address,
    101					   unsigned char byte)
    102{
    103	struct mmc_command io_cmd;
    104
    105	memset(&io_cmd, 0, sizeof(io_cmd));
    106	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
    107	io_cmd.opcode = SD_IO_RW_DIRECT;
    108	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
    109
    110	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
    111}
    112
    113static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
    114					   unsigned int address,
    115					   unsigned char *byte)
    116{
    117	struct mmc_command io_cmd;
    118	int ret;
    119
    120	memset(&io_cmd, 0, sizeof(io_cmd));
    121	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
    122	io_cmd.opcode = SD_IO_RW_DIRECT;
    123	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
    124
    125	ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
    126	if (!ret)
    127		*byte = io_cmd.resp[0];
    128
    129	return ret;
    130}
    131
    132static int ath10k_sdio_config(struct ath10k *ar)
    133{
    134	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    135	struct sdio_func *func = ar_sdio->func;
    136	unsigned char byte, asyncintdelay = 2;
    137	int ret;
    138
    139	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
    140
    141	sdio_claim_host(func);
    142
    143	byte = 0;
    144	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
    145					      SDIO_CCCR_DRIVE_STRENGTH,
    146					      &byte);
    147
    148	byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
    149	byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
    150			   ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
    151
    152	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
    153					      SDIO_CCCR_DRIVE_STRENGTH,
    154					      byte);
    155
    156	byte = 0;
    157	ret = ath10k_sdio_func0_cmd52_rd_byte(
    158		func->card,
    159		CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
    160		&byte);
    161
    162	byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
    163		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
    164		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
    165
    166	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
    167					      CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
    168					      byte);
    169	if (ret) {
    170		ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
    171		goto out;
    172	}
    173
    174	byte = 0;
    175	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
    176					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
    177					      &byte);
    178
    179	byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
    180
    181	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
    182					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
    183					      byte);
    184	if (ret) {
    185		ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
    186			    ret);
    187		goto out;
    188	}
    189
    190	byte = 0;
    191	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
    192					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
    193					      &byte);
    194
    195	byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
    196	byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
    197
    198	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
    199					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
    200					      byte);
    201
    202	/* give us some time to enable, in ms */
    203	func->enable_timeout = 100;
    204
    205	ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
    206	if (ret) {
    207		ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
    208			    ar_sdio->mbox_info.block_size, ret);
    209		goto out;
    210	}
    211
    212out:
    213	sdio_release_host(func);
    214	return ret;
    215}
    216
    217static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
    218{
    219	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    220	struct sdio_func *func = ar_sdio->func;
    221	int ret;
    222
    223	sdio_claim_host(func);
    224
    225	sdio_writel(func, val, addr, &ret);
    226	if (ret) {
    227		ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
    228			    val, addr, ret);
    229		goto out;
    230	}
    231
    232	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
    233		   addr, val);
    234
    235out:
    236	sdio_release_host(func);
    237
    238	return ret;
    239}
    240
    241static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
    242{
    243	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    244	struct sdio_func *func = ar_sdio->func;
    245	__le32 *buf;
    246	int ret;
    247
    248	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
    249	if (!buf)
    250		return -ENOMEM;
    251
    252	*buf = cpu_to_le32(val);
    253
    254	sdio_claim_host(func);
    255
    256	ret = sdio_writesb(func, addr, buf, sizeof(*buf));
    257	if (ret) {
    258		ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
    259			    val, addr, ret);
    260		goto out;
    261	}
    262
    263	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
    264		   addr, val);
    265
    266out:
    267	sdio_release_host(func);
    268
    269	kfree(buf);
    270
    271	return ret;
    272}
    273
    274static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
    275{
    276	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    277	struct sdio_func *func = ar_sdio->func;
    278	int ret;
    279
    280	sdio_claim_host(func);
    281	*val = sdio_readl(func, addr, &ret);
    282	if (ret) {
    283		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
    284			    addr, ret);
    285		goto out;
    286	}
    287
    288	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
    289		   addr, *val);
    290
    291out:
    292	sdio_release_host(func);
    293
    294	return ret;
    295}
    296
    297static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
    298{
    299	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    300	struct sdio_func *func = ar_sdio->func;
    301	int ret;
    302
    303	sdio_claim_host(func);
    304
    305	ret = sdio_memcpy_fromio(func, buf, addr, len);
    306	if (ret) {
    307		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
    308			    addr, ret);
    309		goto out;
    310	}
    311
    312	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
    313		   addr, buf, len);
    314	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
    315
    316out:
    317	sdio_release_host(func);
    318
    319	return ret;
    320}
    321
    322static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
    323{
    324	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    325	struct sdio_func *func = ar_sdio->func;
    326	int ret;
    327
    328	sdio_claim_host(func);
    329
    330	/* For some reason toio() doesn't have const for the buffer, need
    331	 * an ugly hack to workaround that.
    332	 */
    333	ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
    334	if (ret) {
    335		ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
    336			    addr, ret);
    337		goto out;
    338	}
    339
    340	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
    341		   addr, buf, len);
    342	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
    343
    344out:
    345	sdio_release_host(func);
    346
    347	return ret;
    348}
    349
    350static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
    351{
    352	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    353	struct sdio_func *func = ar_sdio->func;
    354	int ret;
    355
    356	sdio_claim_host(func);
    357
    358	len = round_down(len, ar_sdio->mbox_info.block_size);
    359
    360	ret = sdio_readsb(func, buf, addr, len);
    361	if (ret) {
    362		ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
    363			    addr, ret);
    364		goto out;
    365	}
    366
    367	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
    368		   addr, buf, len);
    369	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
    370
    371out:
    372	sdio_release_host(func);
    373
    374	return ret;
    375}
    376
    377/* HIF mbox functions */
    378
    379static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
    380					      struct ath10k_sdio_rx_data *pkt,
    381					      u32 *lookaheads,
    382					      int *n_lookaheads)
    383{
    384	struct ath10k_htc *htc = &ar->htc;
    385	struct sk_buff *skb = pkt->skb;
    386	struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
    387	bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
    388	enum ath10k_htc_ep_id eid;
    389	u8 *trailer;
    390	int ret;
    391
    392	if (trailer_present) {
    393		trailer = skb->data + skb->len - htc_hdr->trailer_len;
    394
    395		eid = pipe_id_to_eid(htc_hdr->eid);
    396
    397		ret = ath10k_htc_process_trailer(htc,
    398						 trailer,
    399						 htc_hdr->trailer_len,
    400						 eid,
    401						 lookaheads,
    402						 n_lookaheads);
    403		if (ret)
    404			return ret;
    405
    406		if (is_trailer_only_msg(pkt))
    407			pkt->trailer_only = true;
    408
    409		skb_trim(skb, skb->len - htc_hdr->trailer_len);
    410	}
    411
    412	skb_pull(skb, sizeof(*htc_hdr));
    413
    414	return 0;
    415}
    416
    417static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
    418					       u32 lookaheads[],
    419					       int *n_lookahead)
    420{
    421	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    422	struct ath10k_htc *htc = &ar->htc;
    423	struct ath10k_sdio_rx_data *pkt;
    424	struct ath10k_htc_ep *ep;
    425	struct ath10k_skb_rxcb *cb;
    426	enum ath10k_htc_ep_id id;
    427	int ret, i, *n_lookahead_local;
    428	u32 *lookaheads_local;
    429	int lookahead_idx = 0;
    430
    431	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
    432		lookaheads_local = lookaheads;
    433		n_lookahead_local = n_lookahead;
    434
    435		id = ((struct ath10k_htc_hdr *)
    436		      &lookaheads[lookahead_idx++])->eid;
    437
    438		if (id >= ATH10K_HTC_EP_COUNT) {
    439			ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
    440				    id);
    441			ret = -ENOMEM;
    442			goto out;
    443		}
    444
    445		ep = &htc->endpoint[id];
    446
    447		if (ep->service_id == 0) {
    448			ath10k_warn(ar, "ep %d is not connected\n", id);
    449			ret = -ENOMEM;
    450			goto out;
    451		}
    452
    453		pkt = &ar_sdio->rx_pkts[i];
    454
    455		if (pkt->part_of_bundle && !pkt->last_in_bundle) {
    456			/* Only read lookahead's from RX trailers
    457			 * for the last packet in a bundle.
    458			 */
    459			lookahead_idx--;
    460			lookaheads_local = NULL;
    461			n_lookahead_local = NULL;
    462		}
    463
    464		ret = ath10k_sdio_mbox_rx_process_packet(ar,
    465							 pkt,
    466							 lookaheads_local,
    467							 n_lookahead_local);
    468		if (ret)
    469			goto out;
    470
    471		if (!pkt->trailer_only) {
    472			cb = ATH10K_SKB_RXCB(pkt->skb);
    473			cb->eid = id;
    474
    475			skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
    476			queue_work(ar->workqueue_aux,
    477				   &ar_sdio->async_work_rx);
    478		} else {
    479			kfree_skb(pkt->skb);
    480		}
    481
    482		/* The RX complete handler now owns the skb...*/
    483		pkt->skb = NULL;
    484		pkt->alloc_len = 0;
    485	}
    486
    487	ret = 0;
    488
    489out:
    490	/* Free all packets that was not passed on to the RX completion
    491	 * handler...
    492	 */
    493	for (; i < ar_sdio->n_rx_pkts; i++)
    494		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
    495
    496	return ret;
    497}
    498
    499static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
    500					 struct ath10k_sdio_rx_data *rx_pkts,
    501					 struct ath10k_htc_hdr *htc_hdr,
    502					 size_t full_len, size_t act_len,
    503					 size_t *bndl_cnt)
    504{
    505	int ret, i;
    506	u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
    507
    508	*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
    509
    510	if (*bndl_cnt > max_msgs) {
    511		ath10k_warn(ar,
    512			    "HTC bundle length %u exceeds maximum %u\n",
    513			    le16_to_cpu(htc_hdr->len),
    514			    max_msgs);
    515		return -ENOMEM;
    516	}
    517
    518	/* Allocate bndl_cnt extra skb's for the bundle.
    519	 * The package containing the
    520	 * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
    521	 * in bndl_cnt. The skb for that packet will be
    522	 * allocated separately.
    523	 */
    524	for (i = 0; i < *bndl_cnt; i++) {
    525		ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
    526						    act_len,
    527						    full_len,
    528						    true,
    529						    false);
    530		if (ret)
    531			return ret;
    532	}
    533
    534	return 0;
    535}
    536
    537static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
    538				     u32 lookaheads[], int n_lookaheads)
    539{
    540	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    541	struct ath10k_htc_hdr *htc_hdr;
    542	size_t full_len, act_len;
    543	bool last_in_bundle;
    544	int ret, i;
    545	int pkt_cnt = 0;
    546
    547	if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
    548		ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
    549			    n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
    550		ret = -ENOMEM;
    551		goto err;
    552	}
    553
    554	for (i = 0; i < n_lookaheads; i++) {
    555		htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
    556		last_in_bundle = false;
    557
    558		if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
    559			ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
    560				    le16_to_cpu(htc_hdr->len),
    561				    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
    562			ret = -ENOMEM;
    563
    564			ath10k_core_start_recovery(ar);
    565			ath10k_warn(ar, "exceeds length, start recovery\n");
    566
    567			goto err;
    568		}
    569
    570		act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
    571		full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
    572
    573		if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
    574			ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
    575				    htc_hdr->eid, htc_hdr->flags,
    576				    le16_to_cpu(htc_hdr->len));
    577			ret = -EINVAL;
    578			goto err;
    579		}
    580
    581		if (ath10k_htc_get_bundle_count(
    582			ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
    583			/* HTC header indicates that every packet to follow
    584			 * has the same padded length so that it can be
    585			 * optimally fetched as a full bundle.
    586			 */
    587			size_t bndl_cnt;
    588
    589			ret = ath10k_sdio_mbox_alloc_bundle(ar,
    590							    &ar_sdio->rx_pkts[pkt_cnt],
    591							    htc_hdr,
    592							    full_len,
    593							    act_len,
    594							    &bndl_cnt);
    595
    596			if (ret) {
    597				ath10k_warn(ar, "failed to allocate a bundle: %d\n",
    598					    ret);
    599				goto err;
    600			}
    601
    602			pkt_cnt += bndl_cnt;
    603
    604			/* next buffer will be the last in the bundle */
    605			last_in_bundle = true;
    606		}
    607
    608		/* Allocate skb for packet. If the packet had the
    609		 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
    610		 * packet skb's have been allocated in the previous step.
    611		 */
    612		if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
    613			full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
    614
    615		ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
    616						    act_len,
    617						    full_len,
    618						    last_in_bundle,
    619						    last_in_bundle);
    620		if (ret) {
    621			ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
    622			goto err;
    623		}
    624
    625		pkt_cnt++;
    626	}
    627
    628	ar_sdio->n_rx_pkts = pkt_cnt;
    629
    630	return 0;
    631
    632err:
    633	for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
    634		if (!ar_sdio->rx_pkts[i].alloc_len)
    635			break;
    636		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
    637	}
    638
    639	return ret;
    640}
    641
    642static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
    643{
    644	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    645	struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
    646	struct sk_buff *skb = pkt->skb;
    647	struct ath10k_htc_hdr *htc_hdr;
    648	int ret;
    649
    650	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
    651				 skb->data, pkt->alloc_len);
    652	if (ret)
    653		goto err;
    654
    655	htc_hdr = (struct ath10k_htc_hdr *)skb->data;
    656	pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
    657
    658	if (pkt->act_len > pkt->alloc_len) {
    659		ret = -EINVAL;
    660		goto err;
    661	}
    662
    663	skb_put(skb, pkt->act_len);
    664	return 0;
    665
    666err:
    667	ar_sdio->n_rx_pkts = 0;
    668	ath10k_sdio_mbox_free_rx_pkt(pkt);
    669
    670	return ret;
    671}
    672
    673static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
    674{
    675	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    676	struct ath10k_sdio_rx_data *pkt;
    677	struct ath10k_htc_hdr *htc_hdr;
    678	int ret, i;
    679	u32 pkt_offset, virt_pkt_len;
    680
    681	virt_pkt_len = 0;
    682	for (i = 0; i < ar_sdio->n_rx_pkts; i++)
    683		virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
    684
    685	if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
    686		ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
    687		ret = -E2BIG;
    688		goto err;
    689	}
    690
    691	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
    692				 ar_sdio->vsg_buffer, virt_pkt_len);
    693	if (ret) {
    694		ath10k_warn(ar, "failed to read bundle packets: %d", ret);
    695		goto err;
    696	}
    697
    698	pkt_offset = 0;
    699	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
    700		pkt = &ar_sdio->rx_pkts[i];
    701		htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
    702		pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
    703
    704		if (pkt->act_len > pkt->alloc_len) {
    705			ret = -EINVAL;
    706			goto err;
    707		}
    708
    709		skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
    710		pkt_offset += pkt->alloc_len;
    711	}
    712
    713	return 0;
    714
    715err:
    716	/* Free all packets that was not successfully fetched. */
    717	for (i = 0; i < ar_sdio->n_rx_pkts; i++)
    718		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
    719
    720	ar_sdio->n_rx_pkts = 0;
    721
    722	return ret;
    723}
    724
    725/* This is the timeout for mailbox processing done in the sdio irq
    726 * handler. The timeout is deliberately set quite high since SDIO dump logs
    727 * over serial port can/will add a substantial overhead to the processing
    728 * (if enabled).
    729 */
    730#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
    731
    732static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
    733						  u32 msg_lookahead, bool *done)
    734{
    735	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    736	u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
    737	int n_lookaheads = 1;
    738	unsigned long timeout;
    739	int ret;
    740
    741	*done = true;
    742
    743	/* Copy the lookahead obtained from the HTC register table into our
    744	 * temp array as a start value.
    745	 */
    746	lookaheads[0] = msg_lookahead;
    747
    748	timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
    749	do {
    750		/* Try to allocate as many HTC RX packets indicated by
    751		 * n_lookaheads.
    752		 */
    753		ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
    754						n_lookaheads);
    755		if (ret)
    756			break;
    757
    758		if (ar_sdio->n_rx_pkts >= 2)
    759			/* A recv bundle was detected, force IRQ status
    760			 * re-check again.
    761			 */
    762			*done = false;
    763
    764		if (ar_sdio->n_rx_pkts > 1)
    765			ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
    766		else
    767			ret = ath10k_sdio_mbox_rx_fetch(ar);
    768
    769		/* Process fetched packets. This will potentially update
    770		 * n_lookaheads depending on if the packets contain lookahead
    771		 * reports.
    772		 */
    773		n_lookaheads = 0;
    774		ret = ath10k_sdio_mbox_rx_process_packets(ar,
    775							  lookaheads,
    776							  &n_lookaheads);
    777
    778		if (!n_lookaheads || ret)
    779			break;
    780
    781		/* For SYNCH processing, if we get here, we are running
    782		 * through the loop again due to updated lookaheads. Set
    783		 * flag that we should re-check IRQ status registers again
    784		 * before leaving IRQ processing, this can net better
    785		 * performance in high throughput situations.
    786		 */
    787		*done = false;
    788	} while (time_before(jiffies, timeout));
    789
    790	if (ret && (ret != -ECANCELED))
    791		ath10k_warn(ar, "failed to get pending recv messages: %d\n",
    792			    ret);
    793
    794	return ret;
    795}
    796
    797static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
    798{
    799	u32 val;
    800	int ret;
    801
    802	/* TODO: Add firmware crash handling */
    803	ath10k_warn(ar, "firmware crashed\n");
    804
    805	/* read counter to clear the interrupt, the debug error interrupt is
    806	 * counter 0.
    807	 */
    808	ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
    809	if (ret)
    810		ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
    811
    812	return ret;
    813}
    814
    815static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
    816{
    817	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    818	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
    819	u8 counter_int_status;
    820	int ret;
    821
    822	mutex_lock(&irq_data->mtx);
    823	counter_int_status = irq_data->irq_proc_reg->counter_int_status &
    824			     irq_data->irq_en_reg->cntr_int_status_en;
    825
    826	/* NOTE: other modules like GMBOX may use the counter interrupt for
    827	 * credit flow control on other counters, we only need to check for
    828	 * the debug assertion counter interrupt.
    829	 */
    830	if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
    831		ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
    832	else
    833		ret = 0;
    834
    835	mutex_unlock(&irq_data->mtx);
    836
    837	return ret;
    838}
    839
    840static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
    841{
    842	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    843	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
    844	u8 error_int_status;
    845	int ret;
    846
    847	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
    848
    849	error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
    850	if (!error_int_status) {
    851		ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
    852			    error_int_status);
    853		return -EIO;
    854	}
    855
    856	ath10k_dbg(ar, ATH10K_DBG_SDIO,
    857		   "sdio error_int_status 0x%x\n", error_int_status);
    858
    859	if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
    860		      error_int_status))
    861		ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
    862
    863	if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
    864		      error_int_status))
    865		ath10k_warn(ar, "rx underflow interrupt error\n");
    866
    867	if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
    868		      error_int_status))
    869		ath10k_warn(ar, "tx overflow interrupt error\n");
    870
    871	/* Clear the interrupt */
    872	irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
    873
    874	/* set W1C value to clear the interrupt, this hits the register first */
    875	ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
    876				    error_int_status);
    877	if (ret) {
    878		ath10k_warn(ar, "unable to write to error int status address: %d\n",
    879			    ret);
    880		return ret;
    881	}
    882
    883	return 0;
    884}
    885
    886static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
    887{
    888	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    889	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
    890	u8 cpu_int_status;
    891	int ret;
    892
    893	mutex_lock(&irq_data->mtx);
    894	cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
    895			 irq_data->irq_en_reg->cpu_int_status_en;
    896	if (!cpu_int_status) {
    897		ath10k_warn(ar, "CPU interrupt status is zero\n");
    898		ret = -EIO;
    899		goto out;
    900	}
    901
    902	/* Clear the interrupt */
    903	irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
    904
    905	/* Set up the register transfer buffer to hit the register 4 times,
    906	 * this is done to make the access 4-byte aligned to mitigate issues
    907	 * with host bus interconnects that restrict bus transfer lengths to
    908	 * be a multiple of 4-bytes.
    909	 *
    910	 * Set W1C value to clear the interrupt, this hits the register first.
    911	 */
    912	ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
    913				    cpu_int_status);
    914	if (ret) {
    915		ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
    916			    ret);
    917		goto out;
    918	}
    919
    920out:
    921	mutex_unlock(&irq_data->mtx);
    922	if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
    923		ath10k_sdio_fw_crashed_dump(ar);
    924
    925	return ret;
    926}
    927
    928static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
    929					    u8 *host_int_status,
    930					    u32 *lookahead)
    931{
    932	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
    933	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
    934	struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
    935	struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
    936	u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
    937	int ret;
    938
    939	mutex_lock(&irq_data->mtx);
    940
    941	*lookahead = 0;
    942	*host_int_status = 0;
    943
    944	/* int_status_en is supposed to be non zero, otherwise interrupts
    945	 * shouldn't be enabled. There is however a short time frame during
    946	 * initialization between the irq register and int_status_en init
    947	 * where this can happen.
    948	 * We silently ignore this condition.
    949	 */
    950	if (!irq_en_reg->int_status_en) {
    951		ret = 0;
    952		goto out;
    953	}
    954
    955	/* Read the first sizeof(struct ath10k_irq_proc_registers)
    956	 * bytes of the HTC register table. This
    957	 * will yield us the value of different int status
    958	 * registers and the lookahead registers.
    959	 */
    960	ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
    961			       irq_proc_reg, sizeof(*irq_proc_reg));
    962	if (ret) {
    963		ath10k_core_start_recovery(ar);
    964		ath10k_warn(ar, "read int status fail, start recovery\n");
    965		goto out;
    966	}
    967
    968	/* Update only those registers that are enabled */
    969	*host_int_status = irq_proc_reg->host_int_status &
    970			   irq_en_reg->int_status_en;
    971
    972	/* Look at mbox status */
    973	if (!(*host_int_status & htc_mbox)) {
    974		*lookahead = 0;
    975		ret = 0;
    976		goto out;
    977	}
    978
    979	/* Mask out pending mbox value, we use look ahead as
    980	 * the real flag for mbox processing.
    981	 */
    982	*host_int_status &= ~htc_mbox;
    983	if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
    984		*lookahead = le32_to_cpu(
    985			irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
    986		if (!*lookahead)
    987			ath10k_warn(ar, "sdio mbox lookahead is zero\n");
    988	}
    989
    990out:
    991	mutex_unlock(&irq_data->mtx);
    992	return ret;
    993}
    994
    995static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
    996					      bool *done)
    997{
    998	u8 host_int_status;
    999	u32 lookahead;
   1000	int ret;
   1001
   1002	/* NOTE: HIF implementation guarantees that the context of this
   1003	 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
   1004	 * sleep or call any API that can block or switch thread/task
   1005	 * contexts. This is a fully schedulable context.
   1006	 */
   1007
   1008	ret = ath10k_sdio_mbox_read_int_status(ar,
   1009					       &host_int_status,
   1010					       &lookahead);
   1011	if (ret) {
   1012		*done = true;
   1013		goto out;
   1014	}
   1015
   1016	if (!host_int_status && !lookahead) {
   1017		ret = 0;
   1018		*done = true;
   1019		goto out;
   1020	}
   1021
   1022	if (lookahead) {
   1023		ath10k_dbg(ar, ATH10K_DBG_SDIO,
   1024			   "sdio pending mailbox msg lookahead 0x%08x\n",
   1025			   lookahead);
   1026
   1027		ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
   1028							     lookahead,
   1029							     done);
   1030		if (ret)
   1031			goto out;
   1032	}
   1033
   1034	/* now, handle the rest of the interrupts */
   1035	ath10k_dbg(ar, ATH10K_DBG_SDIO,
   1036		   "sdio host_int_status 0x%x\n", host_int_status);
   1037
   1038	if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
   1039		/* CPU Interrupt */
   1040		ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
   1041		if (ret)
   1042			goto out;
   1043	}
   1044
   1045	if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
   1046		/* Error Interrupt */
   1047		ret = ath10k_sdio_mbox_proc_err_intr(ar);
   1048		if (ret)
   1049			goto out;
   1050	}
   1051
   1052	if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
   1053		/* Counter Interrupt */
   1054		ret = ath10k_sdio_mbox_proc_counter_intr(ar);
   1055
   1056	ret = 0;
   1057
   1058out:
   1059	/* An optimization to bypass reading the IRQ status registers
   1060	 * unecessarily which can re-wake the target, if upper layers
   1061	 * determine that we are in a low-throughput mode, we can rely on
   1062	 * taking another interrupt rather than re-checking the status
   1063	 * registers which can re-wake the target.
   1064	 *
   1065	 * NOTE : for host interfaces that makes use of detecting pending
   1066	 * mbox messages at hif can not use this optimization due to
   1067	 * possible side effects, SPI requires the host to drain all
   1068	 * messages from the mailbox before exiting the ISR routine.
   1069	 */
   1070
   1071	ath10k_dbg(ar, ATH10K_DBG_SDIO,
   1072		   "sdio pending irqs done %d status %d",
   1073		   *done, ret);
   1074
   1075	return ret;
   1076}
   1077
   1078static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
   1079{
   1080	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1081	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
   1082	u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
   1083
   1084	mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
   1085	mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
   1086	mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
   1087	mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
   1088	mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
   1089
   1090	mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
   1091
   1092	dev_id_base = (device & 0x0F00);
   1093	dev_id_chiprev = (device & 0x00FF);
   1094	switch (dev_id_base) {
   1095	case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
   1096		if (dev_id_chiprev < 4)
   1097			mbox_info->ext_info[0].htc_ext_sz =
   1098				ATH10K_HIF_MBOX0_EXT_WIDTH;
   1099		else
   1100			/* from QCA6174 2.0(0x504), the width has been extended
   1101			 * to 56K
   1102			 */
   1103			mbox_info->ext_info[0].htc_ext_sz =
   1104				ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
   1105		break;
   1106	case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
   1107		mbox_info->ext_info[0].htc_ext_sz =
   1108			ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
   1109		break;
   1110	default:
   1111		mbox_info->ext_info[0].htc_ext_sz =
   1112				ATH10K_HIF_MBOX0_EXT_WIDTH;
   1113	}
   1114
   1115	mbox_info->ext_info[1].htc_ext_addr =
   1116		mbox_info->ext_info[0].htc_ext_addr +
   1117		mbox_info->ext_info[0].htc_ext_sz +
   1118		ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
   1119	mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
   1120}
   1121
   1122/* BMI functions */
   1123
   1124static int ath10k_sdio_bmi_credits(struct ath10k *ar)
   1125{
   1126	u32 addr, cmd_credits;
   1127	unsigned long timeout;
   1128	int ret;
   1129
   1130	/* Read the counter register to get the command credits */
   1131	addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
   1132	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
   1133	cmd_credits = 0;
   1134
   1135	while (time_before(jiffies, timeout) && !cmd_credits) {
   1136		/* Hit the credit counter with a 4-byte access, the first byte
   1137		 * read will hit the counter and cause a decrement, while the
   1138		 * remaining 3 bytes has no effect. The rationale behind this
   1139		 * is to make all HIF accesses 4-byte aligned.
   1140		 */
   1141		ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
   1142		if (ret) {
   1143			ath10k_warn(ar,
   1144				    "unable to decrement the command credit count register: %d\n",
   1145				    ret);
   1146			return ret;
   1147		}
   1148
   1149		/* The counter is only 8 bits.
   1150		 * Ignore anything in the upper 3 bytes
   1151		 */
   1152		cmd_credits &= 0xFF;
   1153	}
   1154
   1155	if (!cmd_credits) {
   1156		ath10k_warn(ar, "bmi communication timeout\n");
   1157		return -ETIMEDOUT;
   1158	}
   1159
   1160	return 0;
   1161}
   1162
   1163static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
   1164{
   1165	unsigned long timeout;
   1166	u32 rx_word;
   1167	int ret;
   1168
   1169	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
   1170	rx_word = 0;
   1171
   1172	while ((time_before(jiffies, timeout)) && !rx_word) {
   1173		ret = ath10k_sdio_read32(ar,
   1174					 MBOX_HOST_INT_STATUS_ADDRESS,
   1175					 &rx_word);
   1176		if (ret) {
   1177			ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
   1178			return ret;
   1179		}
   1180
   1181		 /* all we really want is one bit */
   1182		rx_word &= 1;
   1183	}
   1184
   1185	if (!rx_word) {
   1186		ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
   1187		return -EINVAL;
   1188	}
   1189
   1190	return ret;
   1191}
   1192
   1193static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
   1194					void *req, u32 req_len,
   1195					void *resp, u32 *resp_len)
   1196{
   1197	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1198	u32 addr;
   1199	int ret;
   1200
   1201	if (req) {
   1202		ret = ath10k_sdio_bmi_credits(ar);
   1203		if (ret)
   1204			return ret;
   1205
   1206		addr = ar_sdio->mbox_info.htc_addr;
   1207
   1208		memcpy(ar_sdio->bmi_buf, req, req_len);
   1209		ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
   1210		if (ret) {
   1211			ath10k_warn(ar,
   1212				    "unable to send the bmi data to the device: %d\n",
   1213				    ret);
   1214			return ret;
   1215		}
   1216	}
   1217
   1218	if (!resp || !resp_len)
   1219		/* No response expected */
   1220		return 0;
   1221
   1222	/* During normal bootup, small reads may be required.
   1223	 * Rather than issue an HIF Read and then wait as the Target
   1224	 * adds successive bytes to the FIFO, we wait here until
   1225	 * we know that response data is available.
   1226	 *
   1227	 * This allows us to cleanly timeout on an unexpected
   1228	 * Target failure rather than risk problems at the HIF level.
   1229	 * In particular, this avoids SDIO timeouts and possibly garbage
   1230	 * data on some host controllers.  And on an interconnect
   1231	 * such as Compact Flash (as well as some SDIO masters) which
   1232	 * does not provide any indication on data timeout, it avoids
   1233	 * a potential hang or garbage response.
   1234	 *
   1235	 * Synchronization is more difficult for reads larger than the
   1236	 * size of the MBOX FIFO (128B), because the Target is unable
   1237	 * to push the 129th byte of data until AFTER the Host posts an
   1238	 * HIF Read and removes some FIFO data.  So for large reads the
   1239	 * Host proceeds to post an HIF Read BEFORE all the data is
   1240	 * actually available to read.  Fortunately, large BMI reads do
   1241	 * not occur in practice -- they're supported for debug/development.
   1242	 *
   1243	 * So Host/Target BMI synchronization is divided into these cases:
   1244	 *  CASE 1: length < 4
   1245	 *        Should not happen
   1246	 *
   1247	 *  CASE 2: 4 <= length <= 128
   1248	 *        Wait for first 4 bytes to be in FIFO
   1249	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
   1250	 *        a BMI command credit, which indicates that the ENTIRE
   1251	 *        response is available in the FIFO
   1252	 *
   1253	 *  CASE 3: length > 128
   1254	 *        Wait for the first 4 bytes to be in FIFO
   1255	 *
   1256	 * For most uses, a small timeout should be sufficient and we will
   1257	 * usually see a response quickly; but there may be some unusual
   1258	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
   1259	 * For now, we use an unbounded busy loop while waiting for
   1260	 * BMI_EXECUTE.
   1261	 *
   1262	 * If BMI_EXECUTE ever needs to support longer-latency execution,
   1263	 * especially in production, this code needs to be enhanced to sleep
   1264	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
   1265	 * a function of Host processor speed.
   1266	 */
   1267	ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
   1268	if (ret)
   1269		return ret;
   1270
   1271	/* We always read from the start of the mbox address */
   1272	addr = ar_sdio->mbox_info.htc_addr;
   1273	ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
   1274	if (ret) {
   1275		ath10k_warn(ar,
   1276			    "unable to read the bmi data from the device: %d\n",
   1277			    ret);
   1278		return ret;
   1279	}
   1280
   1281	memcpy(resp, ar_sdio->bmi_buf, *resp_len);
   1282
   1283	return 0;
   1284}
   1285
   1286/* sdio async handling functions */
   1287
   1288static struct ath10k_sdio_bus_request
   1289*ath10k_sdio_alloc_busreq(struct ath10k *ar)
   1290{
   1291	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1292	struct ath10k_sdio_bus_request *bus_req;
   1293
   1294	spin_lock_bh(&ar_sdio->lock);
   1295
   1296	if (list_empty(&ar_sdio->bus_req_freeq)) {
   1297		bus_req = NULL;
   1298		goto out;
   1299	}
   1300
   1301	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
   1302				   struct ath10k_sdio_bus_request, list);
   1303	list_del(&bus_req->list);
   1304
   1305out:
   1306	spin_unlock_bh(&ar_sdio->lock);
   1307	return bus_req;
   1308}
   1309
   1310static void ath10k_sdio_free_bus_req(struct ath10k *ar,
   1311				     struct ath10k_sdio_bus_request *bus_req)
   1312{
   1313	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1314
   1315	memset(bus_req, 0, sizeof(*bus_req));
   1316
   1317	spin_lock_bh(&ar_sdio->lock);
   1318	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
   1319	spin_unlock_bh(&ar_sdio->lock);
   1320}
   1321
   1322static void __ath10k_sdio_write_async(struct ath10k *ar,
   1323				      struct ath10k_sdio_bus_request *req)
   1324{
   1325	struct ath10k_htc_ep *ep;
   1326	struct sk_buff *skb;
   1327	int ret;
   1328
   1329	skb = req->skb;
   1330	ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
   1331	if (ret)
   1332		ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
   1333			    req->address, ret);
   1334
   1335	if (req->htc_msg) {
   1336		ep = &ar->htc.endpoint[req->eid];
   1337		ath10k_htc_notify_tx_completion(ep, skb);
   1338	} else if (req->comp) {
   1339		complete(req->comp);
   1340	}
   1341
   1342	ath10k_sdio_free_bus_req(ar, req);
   1343}
   1344
   1345/* To improve throughput use workqueue to deliver packets to HTC layer,
   1346 * this way SDIO bus is utilised much better.
   1347 */
   1348static void ath10k_rx_indication_async_work(struct work_struct *work)
   1349{
   1350	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
   1351						   async_work_rx);
   1352	struct ath10k *ar = ar_sdio->ar;
   1353	struct ath10k_htc_ep *ep;
   1354	struct ath10k_skb_rxcb *cb;
   1355	struct sk_buff *skb;
   1356
   1357	while (true) {
   1358		skb = skb_dequeue(&ar_sdio->rx_head);
   1359		if (!skb)
   1360			break;
   1361		cb = ATH10K_SKB_RXCB(skb);
   1362		ep = &ar->htc.endpoint[cb->eid];
   1363		ep->ep_ops.ep_rx_complete(ar, skb);
   1364	}
   1365
   1366	if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
   1367		local_bh_disable();
   1368		napi_schedule(&ar->napi);
   1369		local_bh_enable();
   1370	}
   1371}
   1372
   1373static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
   1374{
   1375	struct ath10k *ar = ar_sdio->ar;
   1376	unsigned char rtc_state = 0;
   1377	int ret = 0;
   1378
   1379	rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
   1380	if (ret) {
   1381		ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
   1382		return ret;
   1383	}
   1384
   1385	*state = rtc_state & 0x3;
   1386
   1387	return ret;
   1388}
   1389
   1390static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
   1391{
   1392	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1393	u32 val;
   1394	int retry = ATH10K_CIS_READ_RETRY, ret = 0;
   1395	unsigned char rtc_state = 0;
   1396
   1397	sdio_claim_host(ar_sdio->func);
   1398
   1399	ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
   1400	if (ret) {
   1401		ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
   1402			    ret);
   1403		goto release;
   1404	}
   1405
   1406	if (enable_sleep) {
   1407		val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
   1408		ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
   1409	} else {
   1410		val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
   1411		ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
   1412	}
   1413
   1414	ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
   1415	if (ret) {
   1416		ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
   1417			    ret);
   1418	}
   1419
   1420	if (!enable_sleep) {
   1421		do {
   1422			udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
   1423			ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
   1424
   1425			if (ret) {
   1426				ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
   1427				break;
   1428			}
   1429
   1430			ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
   1431				   rtc_state);
   1432
   1433			if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
   1434				break;
   1435
   1436			udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
   1437			retry--;
   1438		} while (retry > 0);
   1439	}
   1440
   1441release:
   1442	sdio_release_host(ar_sdio->func);
   1443
   1444	return ret;
   1445}
   1446
   1447static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
   1448{
   1449	struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
   1450
   1451	ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
   1452	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
   1453}
   1454
   1455static void ath10k_sdio_write_async_work(struct work_struct *work)
   1456{
   1457	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
   1458						   wr_async_work);
   1459	struct ath10k *ar = ar_sdio->ar;
   1460	struct ath10k_sdio_bus_request *req, *tmp_req;
   1461	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
   1462
   1463	spin_lock_bh(&ar_sdio->wr_async_lock);
   1464
   1465	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
   1466		list_del(&req->list);
   1467		spin_unlock_bh(&ar_sdio->wr_async_lock);
   1468
   1469		if (req->address >= mbox_info->htc_addr &&
   1470		    ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
   1471			ath10k_sdio_set_mbox_sleep(ar, false);
   1472			mod_timer(&ar_sdio->sleep_timer, jiffies +
   1473				  msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
   1474		}
   1475
   1476		__ath10k_sdio_write_async(ar, req);
   1477		spin_lock_bh(&ar_sdio->wr_async_lock);
   1478	}
   1479
   1480	spin_unlock_bh(&ar_sdio->wr_async_lock);
   1481
   1482	if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
   1483		ath10k_sdio_set_mbox_sleep(ar, true);
   1484}
   1485
   1486static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
   1487				      struct sk_buff *skb,
   1488				      struct completion *comp,
   1489				      bool htc_msg, enum ath10k_htc_ep_id eid)
   1490{
   1491	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1492	struct ath10k_sdio_bus_request *bus_req;
   1493
   1494	/* Allocate a bus request for the message and queue it on the
   1495	 * SDIO workqueue.
   1496	 */
   1497	bus_req = ath10k_sdio_alloc_busreq(ar);
   1498	if (!bus_req) {
   1499		ath10k_warn(ar,
   1500			    "unable to allocate bus request for async request\n");
   1501		return -ENOMEM;
   1502	}
   1503
   1504	bus_req->skb = skb;
   1505	bus_req->eid = eid;
   1506	bus_req->address = addr;
   1507	bus_req->htc_msg = htc_msg;
   1508	bus_req->comp = comp;
   1509
   1510	spin_lock_bh(&ar_sdio->wr_async_lock);
   1511	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
   1512	spin_unlock_bh(&ar_sdio->wr_async_lock);
   1513
   1514	return 0;
   1515}
   1516
   1517/* IRQ handler */
   1518
   1519static void ath10k_sdio_irq_handler(struct sdio_func *func)
   1520{
   1521	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
   1522	struct ath10k *ar = ar_sdio->ar;
   1523	unsigned long timeout;
   1524	bool done = false;
   1525	int ret;
   1526
   1527	/* Release the host during interrupts so we can pick it back up when
   1528	 * we process commands.
   1529	 */
   1530	sdio_release_host(ar_sdio->func);
   1531
   1532	timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
   1533	do {
   1534		ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
   1535		if (ret)
   1536			break;
   1537	} while (time_before(jiffies, timeout) && !done);
   1538
   1539	ath10k_mac_tx_push_pending(ar);
   1540
   1541	sdio_claim_host(ar_sdio->func);
   1542
   1543	if (ret && ret != -ECANCELED)
   1544		ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
   1545			    ret);
   1546}
   1547
   1548/* sdio HIF functions */
   1549
   1550static int ath10k_sdio_disable_intrs(struct ath10k *ar)
   1551{
   1552	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1553	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
   1554	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
   1555	int ret;
   1556
   1557	mutex_lock(&irq_data->mtx);
   1558
   1559	memset(regs, 0, sizeof(*regs));
   1560	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
   1561				&regs->int_status_en, sizeof(*regs));
   1562	if (ret)
   1563		ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
   1564
   1565	mutex_unlock(&irq_data->mtx);
   1566
   1567	return ret;
   1568}
   1569
   1570static int ath10k_sdio_hif_power_up(struct ath10k *ar,
   1571				    enum ath10k_firmware_mode fw_mode)
   1572{
   1573	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1574	struct sdio_func *func = ar_sdio->func;
   1575	int ret;
   1576
   1577	if (!ar_sdio->is_disabled)
   1578		return 0;
   1579
   1580	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
   1581
   1582	ret = ath10k_sdio_config(ar);
   1583	if (ret) {
   1584		ath10k_err(ar, "failed to config sdio: %d\n", ret);
   1585		return ret;
   1586	}
   1587
   1588	sdio_claim_host(func);
   1589
   1590	ret = sdio_enable_func(func);
   1591	if (ret) {
   1592		ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
   1593		sdio_release_host(func);
   1594		return ret;
   1595	}
   1596
   1597	sdio_release_host(func);
   1598
   1599	/* Wait for hardware to initialise. It should take a lot less than
   1600	 * 20 ms but let's be conservative here.
   1601	 */
   1602	msleep(20);
   1603
   1604	ar_sdio->is_disabled = false;
   1605
   1606	ret = ath10k_sdio_disable_intrs(ar);
   1607	if (ret)
   1608		return ret;
   1609
   1610	return 0;
   1611}
   1612
   1613static void ath10k_sdio_hif_power_down(struct ath10k *ar)
   1614{
   1615	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1616	int ret;
   1617
   1618	if (ar_sdio->is_disabled)
   1619		return;
   1620
   1621	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
   1622
   1623	del_timer_sync(&ar_sdio->sleep_timer);
   1624	ath10k_sdio_set_mbox_sleep(ar, true);
   1625
   1626	/* Disable the card */
   1627	sdio_claim_host(ar_sdio->func);
   1628
   1629	ret = sdio_disable_func(ar_sdio->func);
   1630	if (ret) {
   1631		ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
   1632		sdio_release_host(ar_sdio->func);
   1633		return;
   1634	}
   1635
   1636	ret = mmc_hw_reset(ar_sdio->func->card);
   1637	if (ret)
   1638		ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
   1639
   1640	sdio_release_host(ar_sdio->func);
   1641
   1642	ar_sdio->is_disabled = true;
   1643}
   1644
   1645static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
   1646				 struct ath10k_hif_sg_item *items, int n_items)
   1647{
   1648	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1649	enum ath10k_htc_ep_id eid;
   1650	struct sk_buff *skb;
   1651	int ret, i;
   1652
   1653	eid = pipe_id_to_eid(pipe_id);
   1654
   1655	for (i = 0; i < n_items; i++) {
   1656		size_t padded_len;
   1657		u32 address;
   1658
   1659		skb = items[i].transfer_context;
   1660		padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
   1661							      skb->len);
   1662		skb_trim(skb, padded_len);
   1663
   1664		/* Write TX data to the end of the mbox address space */
   1665		address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
   1666			  skb->len;
   1667		ret = ath10k_sdio_prep_async_req(ar, address, skb,
   1668						 NULL, true, eid);
   1669		if (ret)
   1670			return ret;
   1671	}
   1672
   1673	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
   1674
   1675	return 0;
   1676}
   1677
   1678static int ath10k_sdio_enable_intrs(struct ath10k *ar)
   1679{
   1680	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1681	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
   1682	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
   1683	int ret;
   1684
   1685	mutex_lock(&irq_data->mtx);
   1686
   1687	/* Enable all but CPU interrupts */
   1688	regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
   1689			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
   1690			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
   1691
   1692	/* NOTE: There are some cases where HIF can do detection of
   1693	 * pending mbox messages which is disabled now.
   1694	 */
   1695	regs->int_status_en |=
   1696		FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
   1697
   1698	/* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
   1699	 * #0 is used for report assertion from target
   1700	 */
   1701	regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
   1702
   1703	/* Set up the Error Interrupt status Register */
   1704	regs->err_int_status_en =
   1705		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
   1706		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
   1707
   1708	/* Enable Counter interrupt status register to get fatal errors for
   1709	 * debugging.
   1710	 */
   1711	regs->cntr_int_status_en =
   1712		FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
   1713			   ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
   1714
   1715	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
   1716				&regs->int_status_en, sizeof(*regs));
   1717	if (ret)
   1718		ath10k_warn(ar,
   1719			    "failed to update mbox interrupt status register : %d\n",
   1720			    ret);
   1721
   1722	mutex_unlock(&irq_data->mtx);
   1723	return ret;
   1724}
   1725
   1726/* HIF diagnostics */
   1727
   1728static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
   1729				     size_t buf_len)
   1730{
   1731	int ret;
   1732	void *mem;
   1733
   1734	mem = kzalloc(buf_len, GFP_KERNEL);
   1735	if (!mem)
   1736		return -ENOMEM;
   1737
   1738	/* set window register to start read cycle */
   1739	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
   1740	if (ret) {
   1741		ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
   1742		goto out;
   1743	}
   1744
   1745	/* read the data */
   1746	ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
   1747	if (ret) {
   1748		ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
   1749			    ret);
   1750		goto out;
   1751	}
   1752
   1753	memcpy(buf, mem, buf_len);
   1754
   1755out:
   1756	kfree(mem);
   1757
   1758	return ret;
   1759}
   1760
   1761static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
   1762				   u32 *value)
   1763{
   1764	__le32 *val;
   1765	int ret;
   1766
   1767	val = kzalloc(sizeof(*val), GFP_KERNEL);
   1768	if (!val)
   1769		return -ENOMEM;
   1770
   1771	ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
   1772	if (ret)
   1773		goto out;
   1774
   1775	*value = __le32_to_cpu(*val);
   1776
   1777out:
   1778	kfree(val);
   1779
   1780	return ret;
   1781}
   1782
   1783static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
   1784					  const void *data, int nbytes)
   1785{
   1786	int ret;
   1787
   1788	/* set write data */
   1789	ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
   1790	if (ret) {
   1791		ath10k_warn(ar,
   1792			    "failed to write 0x%p to mbox window data address: %d\n",
   1793			    data, ret);
   1794		return ret;
   1795	}
   1796
   1797	/* set window register, which starts the write cycle */
   1798	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
   1799	if (ret) {
   1800		ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
   1801		return ret;
   1802	}
   1803
   1804	return 0;
   1805}
   1806
   1807static int ath10k_sdio_hif_start_post(struct ath10k *ar)
   1808{
   1809	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1810	u32 addr, val;
   1811	int ret = 0;
   1812
   1813	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
   1814
   1815	ret = ath10k_sdio_diag_read32(ar, addr, &val);
   1816	if (ret) {
   1817		ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
   1818		return ret;
   1819	}
   1820
   1821	if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
   1822		ath10k_dbg(ar, ATH10K_DBG_SDIO,
   1823			   "sdio mailbox swap service enabled\n");
   1824		ar_sdio->swap_mbox = true;
   1825	} else {
   1826		ath10k_dbg(ar, ATH10K_DBG_SDIO,
   1827			   "sdio mailbox swap service disabled\n");
   1828		ar_sdio->swap_mbox = false;
   1829	}
   1830
   1831	ath10k_sdio_set_mbox_sleep(ar, true);
   1832
   1833	return 0;
   1834}
   1835
   1836static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
   1837{
   1838	u32 addr, val;
   1839	int ret;
   1840
   1841	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
   1842
   1843	ret = ath10k_sdio_diag_read32(ar, addr, &val);
   1844	if (ret) {
   1845		ath10k_warn(ar,
   1846			    "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
   1847		return ret;
   1848	}
   1849
   1850	ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
   1851
   1852	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
   1853		   ret ? " " : " not ");
   1854
   1855	return ret;
   1856}
   1857
   1858/* HIF start/stop */
   1859
   1860static int ath10k_sdio_hif_start(struct ath10k *ar)
   1861{
   1862	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1863	int ret;
   1864
   1865	ath10k_core_napi_enable(ar);
   1866
   1867	/* Sleep 20 ms before HIF interrupts are disabled.
   1868	 * This will give target plenty of time to process the BMI done
   1869	 * request before interrupts are disabled.
   1870	 */
   1871	msleep(20);
   1872	ret = ath10k_sdio_disable_intrs(ar);
   1873	if (ret)
   1874		return ret;
   1875
   1876	/* eid 0 always uses the lower part of the extended mailbox address
   1877	 * space (ext_info[0].htc_ext_addr).
   1878	 */
   1879	ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
   1880	ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
   1881
   1882	sdio_claim_host(ar_sdio->func);
   1883
   1884	/* Register the isr */
   1885	ret =  sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
   1886	if (ret) {
   1887		ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
   1888		sdio_release_host(ar_sdio->func);
   1889		return ret;
   1890	}
   1891
   1892	sdio_release_host(ar_sdio->func);
   1893
   1894	ret = ath10k_sdio_enable_intrs(ar);
   1895	if (ret)
   1896		ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
   1897
   1898	/* Enable sleep and then disable it again */
   1899	ret = ath10k_sdio_set_mbox_sleep(ar, true);
   1900	if (ret)
   1901		return ret;
   1902
   1903	/* Wait for 20ms for the written value to take effect */
   1904	msleep(20);
   1905
   1906	ret = ath10k_sdio_set_mbox_sleep(ar, false);
   1907	if (ret)
   1908		return ret;
   1909
   1910	return 0;
   1911}
   1912
   1913#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
   1914
   1915static void ath10k_sdio_irq_disable(struct ath10k *ar)
   1916{
   1917	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1918	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
   1919	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
   1920	struct sk_buff *skb;
   1921	struct completion irqs_disabled_comp;
   1922	int ret;
   1923
   1924	skb = dev_alloc_skb(sizeof(*regs));
   1925	if (!skb)
   1926		return;
   1927
   1928	mutex_lock(&irq_data->mtx);
   1929
   1930	memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
   1931	memcpy(skb->data, regs, sizeof(*regs));
   1932	skb_put(skb, sizeof(*regs));
   1933
   1934	mutex_unlock(&irq_data->mtx);
   1935
   1936	init_completion(&irqs_disabled_comp);
   1937	ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
   1938					 skb, &irqs_disabled_comp, false, 0);
   1939	if (ret)
   1940		goto out;
   1941
   1942	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
   1943
   1944	/* Wait for the completion of the IRQ disable request.
   1945	 * If there is a timeout we will try to disable irq's anyway.
   1946	 */
   1947	ret = wait_for_completion_timeout(&irqs_disabled_comp,
   1948					  SDIO_IRQ_DISABLE_TIMEOUT_HZ);
   1949	if (!ret)
   1950		ath10k_warn(ar, "sdio irq disable request timed out\n");
   1951
   1952	sdio_claim_host(ar_sdio->func);
   1953
   1954	ret = sdio_release_irq(ar_sdio->func);
   1955	if (ret)
   1956		ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
   1957
   1958	sdio_release_host(ar_sdio->func);
   1959
   1960out:
   1961	kfree_skb(skb);
   1962}
   1963
   1964static void ath10k_sdio_hif_stop(struct ath10k *ar)
   1965{
   1966	struct ath10k_sdio_bus_request *req, *tmp_req;
   1967	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   1968	struct sk_buff *skb;
   1969
   1970	ath10k_sdio_irq_disable(ar);
   1971
   1972	cancel_work_sync(&ar_sdio->async_work_rx);
   1973
   1974	while ((skb = skb_dequeue(&ar_sdio->rx_head)))
   1975		dev_kfree_skb_any(skb);
   1976
   1977	cancel_work_sync(&ar_sdio->wr_async_work);
   1978
   1979	spin_lock_bh(&ar_sdio->wr_async_lock);
   1980
   1981	/* Free all bus requests that have not been handled */
   1982	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
   1983		struct ath10k_htc_ep *ep;
   1984
   1985		list_del(&req->list);
   1986
   1987		if (req->htc_msg) {
   1988			ep = &ar->htc.endpoint[req->eid];
   1989			ath10k_htc_notify_tx_completion(ep, req->skb);
   1990		} else if (req->skb) {
   1991			kfree_skb(req->skb);
   1992		}
   1993		ath10k_sdio_free_bus_req(ar, req);
   1994	}
   1995
   1996	spin_unlock_bh(&ar_sdio->wr_async_lock);
   1997
   1998	ath10k_core_napi_sync_disable(ar);
   1999}
   2000
   2001#ifdef CONFIG_PM
   2002
   2003static int ath10k_sdio_hif_suspend(struct ath10k *ar)
   2004{
   2005	return 0;
   2006}
   2007
   2008static int ath10k_sdio_hif_resume(struct ath10k *ar)
   2009{
   2010	switch (ar->state) {
   2011	case ATH10K_STATE_OFF:
   2012		ath10k_dbg(ar, ATH10K_DBG_SDIO,
   2013			   "sdio resume configuring sdio\n");
   2014
   2015		/* need to set sdio settings after power is cut from sdio */
   2016		ath10k_sdio_config(ar);
   2017		break;
   2018
   2019	case ATH10K_STATE_ON:
   2020	default:
   2021		break;
   2022	}
   2023
   2024	return 0;
   2025}
   2026#endif
   2027
   2028static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
   2029					       u16 service_id,
   2030					       u8 *ul_pipe, u8 *dl_pipe)
   2031{
   2032	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
   2033	struct ath10k_htc *htc = &ar->htc;
   2034	u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
   2035	enum ath10k_htc_ep_id eid;
   2036	bool ep_found = false;
   2037	int i;
   2038
   2039	/* For sdio, we are interested in the mapping between eid
   2040	 * and pipeid rather than service_id to pipe_id.
   2041	 * First we find out which eid has been allocated to the
   2042	 * service...
   2043	 */
   2044	for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
   2045		if (htc->endpoint[i].service_id == service_id) {
   2046			eid = htc->endpoint[i].eid;
   2047			ep_found = true;
   2048			break;
   2049		}
   2050	}
   2051
   2052	if (!ep_found)
   2053		return -EINVAL;
   2054
   2055	/* Then we create the simplest mapping possible between pipeid
   2056	 * and eid
   2057	 */
   2058	*ul_pipe = *dl_pipe = (u8)eid;
   2059
   2060	/* Normally, HTT will use the upper part of the extended
   2061	 * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
   2062	 * the lower part (ext_info[0].htc_ext_addr).
   2063	 * If fw wants swapping of mailbox addresses, the opposite is true.
   2064	 */
   2065	if (ar_sdio->swap_mbox) {
   2066		htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
   2067		wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
   2068		htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
   2069		wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
   2070	} else {
   2071		htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
   2072		wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
   2073		htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
   2074		wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
   2075	}
   2076
   2077	switch (service_id) {
   2078	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
   2079		/* HTC ctrl ep mbox address has already been setup in
   2080		 * ath10k_sdio_hif_start
   2081		 */
   2082		break;
   2083	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
   2084		ar_sdio->mbox_addr[eid] = wmi_addr;
   2085		ar_sdio->mbox_size[eid] = wmi_mbox_size;
   2086		ath10k_dbg(ar, ATH10K_DBG_SDIO,
   2087			   "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
   2088			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
   2089		break;
   2090	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
   2091		ar_sdio->mbox_addr[eid] = htt_addr;
   2092		ar_sdio->mbox_size[eid] = htt_mbox_size;
   2093		ath10k_dbg(ar, ATH10K_DBG_SDIO,
   2094			   "sdio htt data mbox_addr 0x%x mbox_size %d\n",
   2095			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
   2096		break;
   2097	default:
   2098		ath10k_warn(ar, "unsupported HTC service id: %d\n",
   2099			    service_id);
   2100		return -EINVAL;
   2101	}
   2102
   2103	return 0;
   2104}
   2105
   2106static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
   2107					     u8 *ul_pipe, u8 *dl_pipe)
   2108{
   2109	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
   2110
   2111	/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
   2112	 * case) == 0
   2113	 */
   2114	*ul_pipe = 0;
   2115	*dl_pipe = 0;
   2116}
   2117
   2118static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
   2119	.tx_sg			= ath10k_sdio_hif_tx_sg,
   2120	.diag_read		= ath10k_sdio_hif_diag_read,
   2121	.diag_write		= ath10k_sdio_hif_diag_write_mem,
   2122	.exchange_bmi_msg	= ath10k_sdio_bmi_exchange_msg,
   2123	.start			= ath10k_sdio_hif_start,
   2124	.stop			= ath10k_sdio_hif_stop,
   2125	.start_post		= ath10k_sdio_hif_start_post,
   2126	.get_htt_tx_complete	= ath10k_sdio_get_htt_tx_complete,
   2127	.map_service_to_pipe	= ath10k_sdio_hif_map_service_to_pipe,
   2128	.get_default_pipe	= ath10k_sdio_hif_get_default_pipe,
   2129	.power_up		= ath10k_sdio_hif_power_up,
   2130	.power_down		= ath10k_sdio_hif_power_down,
   2131#ifdef CONFIG_PM
   2132	.suspend		= ath10k_sdio_hif_suspend,
   2133	.resume			= ath10k_sdio_hif_resume,
   2134#endif
   2135};
   2136
   2137#ifdef CONFIG_PM_SLEEP
   2138
   2139/* Empty handlers so that mmc subsystem doesn't remove us entirely during
   2140 * suspend. We instead follow cfg80211 suspend/resume handlers.
   2141 */
   2142static int ath10k_sdio_pm_suspend(struct device *device)
   2143{
   2144	struct sdio_func *func = dev_to_sdio_func(device);
   2145	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
   2146	struct ath10k *ar = ar_sdio->ar;
   2147	mmc_pm_flag_t pm_flag, pm_caps;
   2148	int ret;
   2149
   2150	if (!device_may_wakeup(ar->dev))
   2151		return 0;
   2152
   2153	ath10k_sdio_set_mbox_sleep(ar, true);
   2154
   2155	pm_flag = MMC_PM_KEEP_POWER;
   2156
   2157	ret = sdio_set_host_pm_flags(func, pm_flag);
   2158	if (ret) {
   2159		pm_caps = sdio_get_host_pm_caps(func);
   2160		ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
   2161			    pm_flag, pm_caps, ret);
   2162		return ret;
   2163	}
   2164
   2165	return ret;
   2166}
   2167
   2168static int ath10k_sdio_pm_resume(struct device *device)
   2169{
   2170	return 0;
   2171}
   2172
   2173static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
   2174			 ath10k_sdio_pm_resume);
   2175
   2176#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
   2177
   2178#else
   2179
   2180#define ATH10K_SDIO_PM_OPS NULL
   2181
   2182#endif /* CONFIG_PM_SLEEP */
   2183
   2184static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
   2185{
   2186	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
   2187	int done;
   2188
   2189	done = ath10k_htt_rx_hl_indication(ar, budget);
   2190	ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
   2191
   2192	if (done < budget)
   2193		napi_complete_done(ctx, done);
   2194
   2195	return done;
   2196}
   2197
   2198static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
   2199						u32 item_offset,
   2200						u32 *val)
   2201{
   2202	u32 addr;
   2203	int ret;
   2204
   2205	addr = host_interest_item_address(item_offset);
   2206
   2207	ret = ath10k_sdio_diag_read32(ar, addr, val);
   2208
   2209	if (ret)
   2210		ath10k_warn(ar, "unable to read host interest offset %d value\n",
   2211			    item_offset);
   2212
   2213	return ret;
   2214}
   2215
   2216static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
   2217				u32 buf_len)
   2218{
   2219	u32 val;
   2220	int i, ret;
   2221
   2222	for (i = 0; i < buf_len; i += 4) {
   2223		ret = ath10k_sdio_diag_read32(ar, address + i, &val);
   2224		if (ret) {
   2225			ath10k_warn(ar, "unable to read mem %d value\n", address + i);
   2226			break;
   2227		}
   2228		memcpy(buf + i, &val, 4);
   2229	}
   2230
   2231	return ret;
   2232}
   2233
   2234static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
   2235{
   2236	u32 param;
   2237
   2238	ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
   2239
   2240	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
   2241
   2242	return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);
   2243}
   2244
   2245static void ath10k_sdio_dump_registers(struct ath10k *ar,
   2246				       struct ath10k_fw_crash_data *crash_data,
   2247				       bool fast_dump)
   2248{
   2249	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
   2250	int i, ret;
   2251	u32 reg_dump_area;
   2252
   2253	ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
   2254						   &reg_dump_area);
   2255	if (ret) {
   2256		ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
   2257		return;
   2258	}
   2259
   2260	if (fast_dump)
   2261		ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
   2262					     sizeof(reg_dump_values));
   2263	else
   2264		ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
   2265					   sizeof(reg_dump_values));
   2266
   2267	if (ret) {
   2268		ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
   2269		return;
   2270	}
   2271
   2272	ath10k_err(ar, "firmware register dump:\n");
   2273	for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
   2274		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
   2275			   i,
   2276			   reg_dump_values[i],
   2277			   reg_dump_values[i + 1],
   2278			   reg_dump_values[i + 2],
   2279			   reg_dump_values[i + 3]);
   2280
   2281	if (!crash_data)
   2282		return;
   2283
   2284	for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
   2285		crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
   2286}
   2287
   2288static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
   2289					   const struct ath10k_mem_region *mem_region,
   2290					   u8 *buf, size_t buf_len)
   2291{
   2292	const struct ath10k_mem_section *cur_section, *next_section;
   2293	unsigned int count, section_size, skip_size;
   2294	int ret, i, j;
   2295
   2296	if (!mem_region || !buf)
   2297		return 0;
   2298
   2299	cur_section = &mem_region->section_table.sections[0];
   2300
   2301	if (mem_region->start > cur_section->start) {
   2302		ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
   2303			    mem_region->start, cur_section->start);
   2304		return 0;
   2305	}
   2306
   2307	skip_size = cur_section->start - mem_region->start;
   2308
   2309	/* fill the gap between the first register section and register
   2310	 * start address
   2311	 */
   2312	for (i = 0; i < skip_size; i++) {
   2313		*buf = ATH10K_MAGIC_NOT_COPIED;
   2314		buf++;
   2315	}
   2316
   2317	count = 0;
   2318	i = 0;
   2319	for (; cur_section; cur_section = next_section) {
   2320		section_size = cur_section->end - cur_section->start;
   2321
   2322		if (section_size <= 0) {
   2323			ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
   2324				    cur_section->start,
   2325				    cur_section->end);
   2326			break;
   2327		}
   2328
   2329		if (++i == mem_region->section_table.size) {
   2330			/* last section */
   2331			next_section = NULL;
   2332			skip_size = 0;
   2333		} else {
   2334			next_section = cur_section + 1;
   2335
   2336			if (cur_section->end > next_section->start) {
   2337				ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
   2338					    next_section->start,
   2339					    cur_section->end);
   2340				break;
   2341			}
   2342
   2343			skip_size = next_section->start - cur_section->end;
   2344		}
   2345
   2346		if (buf_len < (skip_size + section_size)) {
   2347			ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
   2348			break;
   2349		}
   2350
   2351		buf_len -= skip_size + section_size;
   2352
   2353		/* read section to dest memory */
   2354		ret = ath10k_sdio_read_mem(ar, cur_section->start,
   2355					   buf, section_size);
   2356		if (ret) {
   2357			ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
   2358				    cur_section->start, ret);
   2359			break;
   2360		}
   2361
   2362		buf += section_size;
   2363		count += section_size;
   2364
   2365		/* fill in the gap between this section and the next */
   2366		for (j = 0; j < skip_size; j++) {
   2367			*buf = ATH10K_MAGIC_NOT_COPIED;
   2368			buf++;
   2369		}
   2370
   2371		count += skip_size;
   2372	}
   2373
   2374	return count;
   2375}
   2376
   2377/* if an error happened returns < 0, otherwise the length */
   2378static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
   2379					   const struct ath10k_mem_region *current_region,
   2380					   u8 *buf,
   2381					   bool fast_dump)
   2382{
   2383	int ret;
   2384
   2385	if (current_region->section_table.size > 0)
   2386		/* Copy each section individually. */
   2387		return ath10k_sdio_dump_memory_section(ar,
   2388						      current_region,
   2389						      buf,
   2390						      current_region->len);
   2391
   2392	/* No individiual memory sections defined so we can
   2393	 * copy the entire memory region.
   2394	 */
   2395	if (fast_dump)
   2396		ret = ath10k_bmi_read_memory(ar,
   2397					     current_region->start,
   2398					     buf,
   2399					     current_region->len);
   2400	else
   2401		ret = ath10k_sdio_read_mem(ar,
   2402					   current_region->start,
   2403					   buf,
   2404					   current_region->len);
   2405
   2406	if (ret) {
   2407		ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
   2408			    current_region->name, ret);
   2409		return ret;
   2410	}
   2411
   2412	return current_region->len;
   2413}
   2414
   2415static void ath10k_sdio_dump_memory(struct ath10k *ar,
   2416				    struct ath10k_fw_crash_data *crash_data,
   2417				    bool fast_dump)
   2418{
   2419	const struct ath10k_hw_mem_layout *mem_layout;
   2420	const struct ath10k_mem_region *current_region;
   2421	struct ath10k_dump_ram_data_hdr *hdr;
   2422	u32 count;
   2423	size_t buf_len;
   2424	int ret, i;
   2425	u8 *buf;
   2426
   2427	if (!crash_data)
   2428		return;
   2429
   2430	mem_layout = ath10k_coredump_get_mem_layout(ar);
   2431	if (!mem_layout)
   2432		return;
   2433
   2434	current_region = &mem_layout->region_table.regions[0];
   2435
   2436	buf = crash_data->ramdump_buf;
   2437	buf_len = crash_data->ramdump_buf_len;
   2438
   2439	memset(buf, 0, buf_len);
   2440
   2441	for (i = 0; i < mem_layout->region_table.size; i++) {
   2442		count = 0;
   2443
   2444		if (current_region->len > buf_len) {
   2445			ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
   2446				    current_region->name,
   2447				    current_region->len,
   2448				    buf_len);
   2449			break;
   2450		}
   2451
   2452		/* Reserve space for the header. */
   2453		hdr = (void *)buf;
   2454		buf += sizeof(*hdr);
   2455		buf_len -= sizeof(*hdr);
   2456
   2457		ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
   2458						      fast_dump);
   2459		if (ret >= 0)
   2460			count = ret;
   2461
   2462		hdr->region_type = cpu_to_le32(current_region->type);
   2463		hdr->start = cpu_to_le32(current_region->start);
   2464		hdr->length = cpu_to_le32(count);
   2465
   2466		if (count == 0)
   2467			/* Note: the header remains, just with zero length. */
   2468			break;
   2469
   2470		buf += count;
   2471		buf_len -= count;
   2472
   2473		current_region++;
   2474	}
   2475}
   2476
   2477void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
   2478{
   2479	struct ath10k_fw_crash_data *crash_data;
   2480	char guid[UUID_STRING_LEN + 1];
   2481	bool fast_dump;
   2482
   2483	fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
   2484
   2485	if (fast_dump)
   2486		ath10k_bmi_start(ar);
   2487
   2488	ar->stats.fw_crash_counter++;
   2489
   2490	ath10k_sdio_disable_intrs(ar);
   2491
   2492	crash_data = ath10k_coredump_new(ar);
   2493
   2494	if (crash_data)
   2495		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
   2496	else
   2497		scnprintf(guid, sizeof(guid), "n/a");
   2498
   2499	ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
   2500	ath10k_print_driver_info(ar);
   2501	ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
   2502	ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
   2503
   2504	ath10k_sdio_enable_intrs(ar);
   2505
   2506	ath10k_core_start_recovery(ar);
   2507}
   2508
   2509static int ath10k_sdio_probe(struct sdio_func *func,
   2510			     const struct sdio_device_id *id)
   2511{
   2512	struct ath10k_sdio *ar_sdio;
   2513	struct ath10k *ar;
   2514	enum ath10k_hw_rev hw_rev;
   2515	u32 dev_id_base;
   2516	struct ath10k_bus_params bus_params = {};
   2517	int ret, i;
   2518
   2519	/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
   2520	 * If there will be newer chipsets that does not use the hw reg
   2521	 * setup as defined in qca6174_regs and qca6174_values, this
   2522	 * assumption is no longer valid and hw_rev must be setup differently
   2523	 * depending on chipset.
   2524	 */
   2525	hw_rev = ATH10K_HW_QCA6174;
   2526
   2527	ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
   2528				hw_rev, &ath10k_sdio_hif_ops);
   2529	if (!ar) {
   2530		dev_err(&func->dev, "failed to allocate core\n");
   2531		return -ENOMEM;
   2532	}
   2533
   2534	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
   2535		       NAPI_POLL_WEIGHT);
   2536
   2537	ath10k_dbg(ar, ATH10K_DBG_BOOT,
   2538		   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
   2539		   func->num, func->vendor, func->device,
   2540		   func->max_blksize, func->cur_blksize);
   2541
   2542	ar_sdio = ath10k_sdio_priv(ar);
   2543
   2544	ar_sdio->irq_data.irq_proc_reg =
   2545		devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
   2546			     GFP_KERNEL);
   2547	if (!ar_sdio->irq_data.irq_proc_reg) {
   2548		ret = -ENOMEM;
   2549		goto err_core_destroy;
   2550	}
   2551
   2552	ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
   2553	if (!ar_sdio->vsg_buffer) {
   2554		ret = -ENOMEM;
   2555		goto err_core_destroy;
   2556	}
   2557
   2558	ar_sdio->irq_data.irq_en_reg =
   2559		devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
   2560			     GFP_KERNEL);
   2561	if (!ar_sdio->irq_data.irq_en_reg) {
   2562		ret = -ENOMEM;
   2563		goto err_core_destroy;
   2564	}
   2565
   2566	ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
   2567	if (!ar_sdio->bmi_buf) {
   2568		ret = -ENOMEM;
   2569		goto err_core_destroy;
   2570	}
   2571
   2572	ar_sdio->func = func;
   2573	sdio_set_drvdata(func, ar_sdio);
   2574
   2575	ar_sdio->is_disabled = true;
   2576	ar_sdio->ar = ar;
   2577
   2578	spin_lock_init(&ar_sdio->lock);
   2579	spin_lock_init(&ar_sdio->wr_async_lock);
   2580	mutex_init(&ar_sdio->irq_data.mtx);
   2581
   2582	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
   2583	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
   2584
   2585	INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
   2586	ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
   2587	if (!ar_sdio->workqueue) {
   2588		ret = -ENOMEM;
   2589		goto err_core_destroy;
   2590	}
   2591
   2592	for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
   2593		ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
   2594
   2595	skb_queue_head_init(&ar_sdio->rx_head);
   2596	INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
   2597
   2598	dev_id_base = (id->device & 0x0F00);
   2599	if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
   2600	    dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
   2601		ret = -ENODEV;
   2602		ath10k_err(ar, "unsupported device id %u (0x%x)\n",
   2603			   dev_id_base, id->device);
   2604		goto err_free_wq;
   2605	}
   2606
   2607	ar->dev_id = QCA9377_1_0_DEVICE_ID;
   2608	ar->id.vendor = id->vendor;
   2609	ar->id.device = id->device;
   2610
   2611	ath10k_sdio_set_mbox_info(ar);
   2612
   2613	bus_params.dev_type = ATH10K_DEV_TYPE_HL;
   2614	/* TODO: don't know yet how to get chip_id with SDIO */
   2615	bus_params.chip_id = 0;
   2616	bus_params.hl_msdu_ids = true;
   2617
   2618	ar->hw->max_mtu = ETH_DATA_LEN;
   2619
   2620	ret = ath10k_core_register(ar, &bus_params);
   2621	if (ret) {
   2622		ath10k_err(ar, "failed to register driver core: %d\n", ret);
   2623		goto err_free_wq;
   2624	}
   2625
   2626	timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
   2627
   2628	return 0;
   2629
   2630err_free_wq:
   2631	destroy_workqueue(ar_sdio->workqueue);
   2632err_core_destroy:
   2633	ath10k_core_destroy(ar);
   2634
   2635	return ret;
   2636}
   2637
   2638static void ath10k_sdio_remove(struct sdio_func *func)
   2639{
   2640	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
   2641	struct ath10k *ar = ar_sdio->ar;
   2642
   2643	ath10k_dbg(ar, ATH10K_DBG_BOOT,
   2644		   "sdio removed func %d vendor 0x%x device 0x%x\n",
   2645		   func->num, func->vendor, func->device);
   2646
   2647	ath10k_core_unregister(ar);
   2648
   2649	netif_napi_del(&ar->napi);
   2650
   2651	ath10k_core_destroy(ar);
   2652
   2653	destroy_workqueue(ar_sdio->workqueue);
   2654}
   2655
   2656static const struct sdio_device_id ath10k_sdio_devices[] = {
   2657	{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
   2658	{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
   2659	{},
   2660};
   2661
   2662MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
   2663
   2664static struct sdio_driver ath10k_sdio_driver = {
   2665	.name = "ath10k_sdio",
   2666	.id_table = ath10k_sdio_devices,
   2667	.probe = ath10k_sdio_probe,
   2668	.remove = ath10k_sdio_remove,
   2669	.drv = {
   2670		.owner = THIS_MODULE,
   2671		.pm = ATH10K_SDIO_PM_OPS,
   2672	},
   2673};
   2674
   2675static int __init ath10k_sdio_init(void)
   2676{
   2677	int ret;
   2678
   2679	ret = sdio_register_driver(&ath10k_sdio_driver);
   2680	if (ret)
   2681		pr_err("sdio driver registration failed: %d\n", ret);
   2682
   2683	return ret;
   2684}
   2685
   2686static void __exit ath10k_sdio_exit(void)
   2687{
   2688	sdio_unregister_driver(&ath10k_sdio_driver);
   2689}
   2690
   2691module_init(ath10k_sdio_init);
   2692module_exit(ath10k_sdio_exit);
   2693
   2694MODULE_AUTHOR("Qualcomm Atheros");
   2695MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
   2696MODULE_LICENSE("Dual BSD/GPL");