cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

htc_mbox.c (76308B)


      1/*
      2 * Copyright (c) 2007-2011 Atheros Communications Inc.
      3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
      4 *
      5 * Permission to use, copy, modify, and/or distribute this software for any
      6 * purpose with or without fee is hereby granted, provided that the above
      7 * copyright notice and this permission notice appear in all copies.
      8 *
      9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     16 */
     17
     18#include "core.h"
     19#include "hif.h"
     20#include "debug.h"
     21#include "hif-ops.h"
     22#include "trace.h"
     23
     24#include <asm/unaligned.h>
     25
     26#define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
     27
     28static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
     29static void ath6kl_htc_mbox_stop(struct htc_target *target);
     30static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
     31					      struct list_head *pkt_queue);
     32static void ath6kl_htc_set_credit_dist(struct htc_target *target,
     33				       struct ath6kl_htc_credit_info *cred_info,
     34				       u16 svc_pri_order[], int len);
     35
     36/* threshold to re-enable Tx bundling for an AC*/
     37#define TX_RESUME_BUNDLE_THRESHOLD	1500
     38
     39/* Functions for Tx credit handling */
     40static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
     41				  struct htc_endpoint_credit_dist *ep_dist,
     42				  int credits)
     43{
     44	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
     45		   ep_dist->endpoint, credits);
     46
     47	ep_dist->credits += credits;
     48	ep_dist->cred_assngd += credits;
     49	cred_info->cur_free_credits -= credits;
     50}
     51
     52static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
     53			       struct list_head *ep_list,
     54			       int tot_credits)
     55{
     56	struct htc_endpoint_credit_dist *cur_ep_dist;
     57	int count;
     58
     59	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
     60
     61	cred_info->cur_free_credits = tot_credits;
     62	cred_info->total_avail_credits = tot_credits;
     63
     64	list_for_each_entry(cur_ep_dist, ep_list, list) {
     65		if (cur_ep_dist->endpoint == ENDPOINT_0)
     66			continue;
     67
     68		cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
     69
     70		if (tot_credits > 4) {
     71			if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
     72			    (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
     73				ath6kl_credit_deposit(cred_info,
     74						      cur_ep_dist,
     75						      cur_ep_dist->cred_min);
     76				cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
     77			}
     78		}
     79
     80		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
     81			ath6kl_credit_deposit(cred_info, cur_ep_dist,
     82					      cur_ep_dist->cred_min);
     83			/*
     84			 * Control service is always marked active, it
     85			 * never goes inactive EVER.
     86			 */
     87			cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
     88		}
     89
     90		/*
     91		 * Streams have to be created (explicit | implicit) for all
     92		 * kinds of traffic. BE endpoints are also inactive in the
     93		 * beginning. When BE traffic starts it creates implicit
     94		 * streams that redistributes credits.
     95		 *
     96		 * Note: all other endpoints have minimums set but are
     97		 * initially given NO credits. credits will be distributed
     98		 * as traffic activity demands
     99		 */
    100	}
    101
    102	/*
    103	 * For ath6kl_credit_seek function,
    104	 * it use list_for_each_entry_reverse to walk around the whole ep list.
    105	 * Therefore assign this lowestpri_ep_dist after walk around the ep_list
    106	 */
    107	cred_info->lowestpri_ep_dist = cur_ep_dist->list;
    108
    109	WARN_ON(cred_info->cur_free_credits <= 0);
    110
    111	list_for_each_entry(cur_ep_dist, ep_list, list) {
    112		if (cur_ep_dist->endpoint == ENDPOINT_0)
    113			continue;
    114
    115		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
    116			cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
    117		} else {
    118			/*
    119			 * For the remaining data endpoints, we assume that
    120			 * each cred_per_msg are the same. We use a simple
    121			 * calculation here, we take the remaining credits
    122			 * and determine how many max messages this can
    123			 * cover and then set each endpoint's normal value
    124			 * equal to 3/4 this amount.
    125			 */
    126			count = (cred_info->cur_free_credits /
    127				 cur_ep_dist->cred_per_msg)
    128				* cur_ep_dist->cred_per_msg;
    129			count = (count * 3) >> 2;
    130			count = max(count, cur_ep_dist->cred_per_msg);
    131			cur_ep_dist->cred_norm = count;
    132		}
    133
    134		ath6kl_dbg(ATH6KL_DBG_CREDIT,
    135			   "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
    136			   cur_ep_dist->endpoint,
    137			   cur_ep_dist->svc_id,
    138			   cur_ep_dist->credits,
    139			   cur_ep_dist->cred_per_msg,
    140			   cur_ep_dist->cred_norm,
    141			   cur_ep_dist->cred_min);
    142	}
    143}
    144
    145/* initialize and setup credit distribution */
    146static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
    147			       struct ath6kl_htc_credit_info *cred_info)
    148{
    149	u16 servicepriority[5];
    150
    151	memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
    152
    153	servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
    154	servicepriority[1] = WMI_DATA_VO_SVC;
    155	servicepriority[2] = WMI_DATA_VI_SVC;
    156	servicepriority[3] = WMI_DATA_BE_SVC;
    157	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
    158
    159	/* set priority list */
    160	ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
    161
    162	return 0;
    163}
    164
    165/* reduce an ep's credits back to a set limit */
    166static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
    167				 struct htc_endpoint_credit_dist *ep_dist,
    168				 int limit)
    169{
    170	int credits;
    171
    172	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
    173		   ep_dist->endpoint, limit);
    174
    175	ep_dist->cred_assngd = limit;
    176
    177	if (ep_dist->credits <= limit)
    178		return;
    179
    180	credits = ep_dist->credits - limit;
    181	ep_dist->credits -= credits;
    182	cred_info->cur_free_credits += credits;
    183}
    184
    185static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
    186				 struct list_head *epdist_list)
    187{
    188	struct htc_endpoint_credit_dist *cur_list;
    189
    190	list_for_each_entry(cur_list, epdist_list, list) {
    191		if (cur_list->endpoint == ENDPOINT_0)
    192			continue;
    193
    194		if (cur_list->cred_to_dist > 0) {
    195			cur_list->credits += cur_list->cred_to_dist;
    196			cur_list->cred_to_dist = 0;
    197
    198			if (cur_list->credits > cur_list->cred_assngd)
    199				ath6kl_credit_reduce(cred_info,
    200						     cur_list,
    201						     cur_list->cred_assngd);
    202
    203			if (cur_list->credits > cur_list->cred_norm)
    204				ath6kl_credit_reduce(cred_info, cur_list,
    205						     cur_list->cred_norm);
    206
    207			if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
    208				if (cur_list->txq_depth == 0)
    209					ath6kl_credit_reduce(cred_info,
    210							     cur_list, 0);
    211			}
    212		}
    213	}
    214}
    215
    216/*
    217 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
    218 * question.
    219 */
    220static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
    221				struct htc_endpoint_credit_dist *ep_dist)
    222{
    223	struct htc_endpoint_credit_dist *curdist_list;
    224	int credits = 0;
    225	int need;
    226
    227	if (ep_dist->svc_id == WMI_CONTROL_SVC)
    228		goto out;
    229
    230	if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
    231	    (ep_dist->svc_id == WMI_DATA_VO_SVC))
    232		if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
    233			goto out;
    234
    235	/*
    236	 * For all other services, we follow a simple algorithm of:
    237	 *
    238	 * 1. checking the free pool for credits
    239	 * 2. checking lower priority endpoints for credits to take
    240	 */
    241
    242	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
    243
    244	if (credits >= ep_dist->seek_cred)
    245		goto out;
    246
    247	/*
    248	 * We don't have enough in the free pool, try taking away from
    249	 * lower priority services The rule for taking away credits:
    250	 *
    251	 *   1. Only take from lower priority endpoints
    252	 *   2. Only take what is allocated above the minimum (never
    253	 *      starve an endpoint completely)
    254	 *   3. Only take what you need.
    255	 */
    256
    257	list_for_each_entry_reverse(curdist_list,
    258				    &cred_info->lowestpri_ep_dist,
    259				    list) {
    260		if (curdist_list == ep_dist)
    261			break;
    262
    263		need = ep_dist->seek_cred - cred_info->cur_free_credits;
    264
    265		if ((curdist_list->cred_assngd - need) >=
    266		     curdist_list->cred_min) {
    267			/*
    268			 * The current one has been allocated more than
    269			 * it's minimum and it has enough credits assigned
    270			 * above it's minimum to fulfill our need try to
    271			 * take away just enough to fulfill our need.
    272			 */
    273			ath6kl_credit_reduce(cred_info, curdist_list,
    274					     curdist_list->cred_assngd - need);
    275
    276			if (cred_info->cur_free_credits >=
    277			    ep_dist->seek_cred)
    278				break;
    279		}
    280
    281		if (curdist_list->endpoint == ENDPOINT_0)
    282			break;
    283	}
    284
    285	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
    286
    287out:
    288	/* did we find some credits? */
    289	if (credits)
    290		ath6kl_credit_deposit(cred_info, ep_dist, credits);
    291
    292	ep_dist->seek_cred = 0;
    293}
    294
    295/* redistribute credits based on activity change */
    296static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
    297				       struct list_head *ep_dist_list)
    298{
    299	struct htc_endpoint_credit_dist *curdist_list;
    300
    301	list_for_each_entry(curdist_list, ep_dist_list, list) {
    302		if (curdist_list->endpoint == ENDPOINT_0)
    303			continue;
    304
    305		if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
    306		    (curdist_list->svc_id == WMI_DATA_BE_SVC))
    307			curdist_list->dist_flags |= HTC_EP_ACTIVE;
    308
    309		if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
    310		    !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
    311			if (curdist_list->txq_depth == 0)
    312				ath6kl_credit_reduce(info, curdist_list, 0);
    313			else
    314				ath6kl_credit_reduce(info,
    315						     curdist_list,
    316						     curdist_list->cred_min);
    317		}
    318	}
    319}
    320
    321/*
    322 *
    323 * This function is invoked whenever endpoints require credit
    324 * distributions. A lock is held while this function is invoked, this
    325 * function shall NOT block. The ep_dist_list is a list of distribution
    326 * structures in prioritized order as defined by the call to the
    327 * htc_set_credit_dist() api.
    328 */
    329static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
    330				     struct list_head *ep_dist_list,
    331			      enum htc_credit_dist_reason reason)
    332{
    333	switch (reason) {
    334	case HTC_CREDIT_DIST_SEND_COMPLETE:
    335		ath6kl_credit_update(cred_info, ep_dist_list);
    336		break;
    337	case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
    338		ath6kl_credit_redistribute(cred_info, ep_dist_list);
    339		break;
    340	default:
    341		break;
    342	}
    343
    344	WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
    345	WARN_ON(cred_info->cur_free_credits < 0);
    346}
    347
    348static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
    349{
    350	u8 *align_addr;
    351
    352	if (!IS_ALIGNED((unsigned long) *buf, 4)) {
    353		align_addr = PTR_ALIGN(*buf - 4, 4);
    354		memmove(align_addr, *buf, len);
    355		*buf = align_addr;
    356	}
    357}
    358
    359static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
    360				   int ctrl0, int ctrl1)
    361{
    362	struct htc_frame_hdr *hdr;
    363
    364	packet->buf -= HTC_HDR_LENGTH;
    365	hdr =  (struct htc_frame_hdr *)packet->buf;
    366
    367	/* Endianess? */
    368	put_unaligned((u16)packet->act_len, &hdr->payld_len);
    369	hdr->flags = flags;
    370	hdr->eid = packet->endpoint;
    371	hdr->ctrl[0] = ctrl0;
    372	hdr->ctrl[1] = ctrl1;
    373}
    374
    375static void htc_reclaim_txctrl_buf(struct htc_target *target,
    376				   struct htc_packet *pkt)
    377{
    378	spin_lock_bh(&target->htc_lock);
    379	list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
    380	spin_unlock_bh(&target->htc_lock);
    381}
    382
    383static struct htc_packet *htc_get_control_buf(struct htc_target *target,
    384					      bool tx)
    385{
    386	struct htc_packet *packet = NULL;
    387	struct list_head *buf_list;
    388
    389	buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
    390
    391	spin_lock_bh(&target->htc_lock);
    392
    393	if (list_empty(buf_list)) {
    394		spin_unlock_bh(&target->htc_lock);
    395		return NULL;
    396	}
    397
    398	packet = list_first_entry(buf_list, struct htc_packet, list);
    399	list_del(&packet->list);
    400	spin_unlock_bh(&target->htc_lock);
    401
    402	if (tx)
    403		packet->buf = packet->buf_start + HTC_HDR_LENGTH;
    404
    405	return packet;
    406}
    407
    408static void htc_tx_comp_update(struct htc_target *target,
    409			       struct htc_endpoint *endpoint,
    410			       struct htc_packet *packet)
    411{
    412	packet->completion = NULL;
    413	packet->buf += HTC_HDR_LENGTH;
    414
    415	if (!packet->status)
    416		return;
    417
    418	ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
    419		   packet->status, packet->endpoint, packet->act_len,
    420		   packet->info.tx.cred_used);
    421
    422	/* on failure to submit, reclaim credits for this packet */
    423	spin_lock_bh(&target->tx_lock);
    424	endpoint->cred_dist.cred_to_dist +=
    425				packet->info.tx.cred_used;
    426	endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
    427
    428	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
    429		   target->credit_info, &target->cred_dist_list);
    430
    431	ath6kl_credit_distribute(target->credit_info,
    432				 &target->cred_dist_list,
    433				 HTC_CREDIT_DIST_SEND_COMPLETE);
    434
    435	spin_unlock_bh(&target->tx_lock);
    436}
    437
    438static void htc_tx_complete(struct htc_endpoint *endpoint,
    439			    struct list_head *txq)
    440{
    441	if (list_empty(txq))
    442		return;
    443
    444	ath6kl_dbg(ATH6KL_DBG_HTC,
    445		   "htc tx complete ep %d pkts %d\n",
    446		   endpoint->eid, get_queue_depth(txq));
    447
    448	ath6kl_tx_complete(endpoint->target, txq);
    449}
    450
    451static void htc_tx_comp_handler(struct htc_target *target,
    452				struct htc_packet *packet)
    453{
    454	struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
    455	struct list_head container;
    456
    457	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
    458		   packet->info.tx.seqno);
    459
    460	htc_tx_comp_update(target, endpoint, packet);
    461	INIT_LIST_HEAD(&container);
    462	list_add_tail(&packet->list, &container);
    463	/* do completion */
    464	htc_tx_complete(endpoint, &container);
    465}
    466
    467static void htc_async_tx_scat_complete(struct htc_target *target,
    468				       struct hif_scatter_req *scat_req)
    469{
    470	struct htc_endpoint *endpoint;
    471	struct htc_packet *packet;
    472	struct list_head tx_compq;
    473	int i;
    474
    475	INIT_LIST_HEAD(&tx_compq);
    476
    477	ath6kl_dbg(ATH6KL_DBG_HTC,
    478		   "htc tx scat complete len %d entries %d\n",
    479		   scat_req->len, scat_req->scat_entries);
    480
    481	if (scat_req->status)
    482		ath6kl_err("send scatter req failed: %d\n", scat_req->status);
    483
    484	packet = scat_req->scat_list[0].packet;
    485	endpoint = &target->endpoint[packet->endpoint];
    486
    487	/* walk through the scatter list and process */
    488	for (i = 0; i < scat_req->scat_entries; i++) {
    489		packet = scat_req->scat_list[i].packet;
    490		if (!packet) {
    491			WARN_ON(1);
    492			return;
    493		}
    494
    495		packet->status = scat_req->status;
    496		htc_tx_comp_update(target, endpoint, packet);
    497		list_add_tail(&packet->list, &tx_compq);
    498	}
    499
    500	/* free scatter request */
    501	hif_scatter_req_add(target->dev->ar, scat_req);
    502
    503	/* complete all packets */
    504	htc_tx_complete(endpoint, &tx_compq);
    505}
    506
    507static int ath6kl_htc_tx_issue(struct htc_target *target,
    508			       struct htc_packet *packet)
    509{
    510	int status;
    511	bool sync = false;
    512	u32 padded_len, send_len;
    513
    514	if (!packet->completion)
    515		sync = true;
    516
    517	send_len = packet->act_len + HTC_HDR_LENGTH;
    518
    519	padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
    520
    521	ath6kl_dbg(ATH6KL_DBG_HTC,
    522		   "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
    523		   send_len, packet->info.tx.seqno, padded_len,
    524		   target->dev->ar->mbox_info.htc_addr,
    525		   sync ? "sync" : "async");
    526
    527	if (sync) {
    528		status = hif_read_write_sync(target->dev->ar,
    529				target->dev->ar->mbox_info.htc_addr,
    530				 packet->buf, padded_len,
    531				 HIF_WR_SYNC_BLOCK_INC);
    532
    533		packet->status = status;
    534		packet->buf += HTC_HDR_LENGTH;
    535	} else
    536		status = hif_write_async(target->dev->ar,
    537				target->dev->ar->mbox_info.htc_addr,
    538				packet->buf, padded_len,
    539				HIF_WR_ASYNC_BLOCK_INC, packet);
    540
    541	trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
    542
    543	return status;
    544}
    545
    546static int htc_check_credits(struct htc_target *target,
    547			     struct htc_endpoint *ep, u8 *flags,
    548			     enum htc_endpoint_id eid, unsigned int len,
    549			     int *req_cred)
    550{
    551	*req_cred = (len > target->tgt_cred_sz) ?
    552		     DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
    553
    554	ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
    555		   *req_cred, ep->cred_dist.credits);
    556
    557	if (ep->cred_dist.credits < *req_cred) {
    558		if (eid == ENDPOINT_0)
    559			return -EINVAL;
    560
    561		/* Seek more credits */
    562		ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
    563
    564		ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
    565
    566		ep->cred_dist.seek_cred = 0;
    567
    568		if (ep->cred_dist.credits < *req_cred) {
    569			ath6kl_dbg(ATH6KL_DBG_CREDIT,
    570				   "credit not found for ep %d\n",
    571				   eid);
    572			return -EINVAL;
    573		}
    574	}
    575
    576	ep->cred_dist.credits -= *req_cred;
    577	ep->ep_st.cred_cosumd += *req_cred;
    578
    579	 /* When we are getting low on credits, ask for more */
    580	if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
    581		ep->cred_dist.seek_cred =
    582		ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
    583
    584		ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
    585
    586		/* see if we were successful in getting more */
    587		if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
    588			/* tell the target we need credits ASAP! */
    589			*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
    590			ep->ep_st.cred_low_indicate += 1;
    591			ath6kl_dbg(ATH6KL_DBG_CREDIT,
    592				   "credit we need credits asap\n");
    593		}
    594	}
    595
    596	return 0;
    597}
    598
    599static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
    600				   struct htc_endpoint *endpoint,
    601				   struct list_head *queue)
    602{
    603	int req_cred;
    604	u8 flags;
    605	struct htc_packet *packet;
    606	unsigned int len;
    607
    608	while (true) {
    609		flags = 0;
    610
    611		if (list_empty(&endpoint->txq))
    612			break;
    613		packet = list_first_entry(&endpoint->txq, struct htc_packet,
    614					  list);
    615
    616		ath6kl_dbg(ATH6KL_DBG_HTC,
    617			   "htc tx got packet 0x%p queue depth %d\n",
    618			   packet, get_queue_depth(&endpoint->txq));
    619
    620		len = CALC_TXRX_PADDED_LEN(target,
    621					   packet->act_len + HTC_HDR_LENGTH);
    622
    623		if (htc_check_credits(target, endpoint, &flags,
    624				      packet->endpoint, len, &req_cred))
    625			break;
    626
    627		/* now we can fully move onto caller's queue */
    628		packet = list_first_entry(&endpoint->txq, struct htc_packet,
    629					  list);
    630		list_move_tail(&packet->list, queue);
    631
    632		/* save the number of credits this packet consumed */
    633		packet->info.tx.cred_used = req_cred;
    634
    635		/* all TX packets are handled asynchronously */
    636		packet->completion = htc_tx_comp_handler;
    637		packet->context = target;
    638		endpoint->ep_st.tx_issued += 1;
    639
    640		/* save send flags */
    641		packet->info.tx.flags = flags;
    642		packet->info.tx.seqno = endpoint->seqno;
    643		endpoint->seqno++;
    644	}
    645}
    646
    647/* See if the padded tx length falls on a credit boundary */
    648static int htc_get_credit_padding(unsigned int cred_sz, int *len,
    649				  struct htc_endpoint *ep)
    650{
    651	int rem_cred, cred_pad;
    652
    653	rem_cred = *len % cred_sz;
    654
    655	/* No padding needed */
    656	if  (!rem_cred)
    657		return 0;
    658
    659	if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
    660		return -1;
    661
    662	/*
    663	 * The transfer consumes a "partial" credit, this
    664	 * packet cannot be bundled unless we add
    665	 * additional "dummy" padding (max 255 bytes) to
    666	 * consume the entire credit.
    667	 */
    668	cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
    669
    670	if ((cred_pad > 0) && (cred_pad <= 255))
    671		*len += cred_pad;
    672	else
    673		/* The amount of padding is too large, send as non-bundled */
    674		return -1;
    675
    676	return cred_pad;
    677}
    678
    679static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
    680					 struct htc_endpoint *endpoint,
    681					 struct hif_scatter_req *scat_req,
    682					 int n_scat,
    683					 struct list_head *queue)
    684{
    685	struct htc_packet *packet;
    686	int i, len, rem_scat, cred_pad;
    687	int status = 0;
    688	u8 flags;
    689
    690	rem_scat = target->max_tx_bndl_sz;
    691
    692	for (i = 0; i < n_scat; i++) {
    693		scat_req->scat_list[i].packet = NULL;
    694
    695		if (list_empty(queue))
    696			break;
    697
    698		packet = list_first_entry(queue, struct htc_packet, list);
    699		len = CALC_TXRX_PADDED_LEN(target,
    700					   packet->act_len + HTC_HDR_LENGTH);
    701
    702		cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
    703						  &len, endpoint);
    704		if (cred_pad < 0 || rem_scat < len) {
    705			status = -ENOSPC;
    706			break;
    707		}
    708
    709		rem_scat -= len;
    710		/* now remove it from the queue */
    711		list_del(&packet->list);
    712
    713		scat_req->scat_list[i].packet = packet;
    714		/* prepare packet and flag message as part of a send bundle */
    715		flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
    716		ath6kl_htc_tx_prep_pkt(packet, flags,
    717				       cred_pad, packet->info.tx.seqno);
    718		/* Make sure the buffer is 4-byte aligned */
    719		ath6kl_htc_tx_buf_align(&packet->buf,
    720					packet->act_len + HTC_HDR_LENGTH);
    721		scat_req->scat_list[i].buf = packet->buf;
    722		scat_req->scat_list[i].len = len;
    723
    724		scat_req->len += len;
    725		scat_req->scat_entries++;
    726		ath6kl_dbg(ATH6KL_DBG_HTC,
    727			   "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
    728			   i, packet, packet->info.tx.seqno, len, rem_scat);
    729	}
    730
    731	/* Roll back scatter setup in case of any failure */
    732	if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
    733		for (i = scat_req->scat_entries - 1; i >= 0; i--) {
    734			packet = scat_req->scat_list[i].packet;
    735			if (packet) {
    736				packet->buf += HTC_HDR_LENGTH;
    737				list_add(&packet->list, queue);
    738			}
    739		}
    740		return -EAGAIN;
    741	}
    742
    743	return status;
    744}
    745
    746/*
    747 * Drain a queue and send as bundles this function may return without fully
    748 * draining the queue when
    749 *
    750 *    1. scatter resources are exhausted
    751 *    2. a message that will consume a partial credit will stop the
    752 *    bundling process early
    753 *    3. we drop below the minimum number of messages for a bundle
    754 */
    755static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
    756				 struct list_head *queue,
    757				 int *sent_bundle, int *n_bundle_pkts)
    758{
    759	struct htc_target *target = endpoint->target;
    760	struct hif_scatter_req *scat_req = NULL;
    761	int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
    762	struct htc_packet *packet;
    763	int status;
    764	u32 txb_mask;
    765	u8 ac = WMM_NUM_AC;
    766
    767	if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
    768	    (WMI_CONTROL_SVC != endpoint->svc_id))
    769		ac = target->dev->ar->ep2ac_map[endpoint->eid];
    770
    771	while (true) {
    772		status = 0;
    773		n_scat = get_queue_depth(queue);
    774		n_scat = min(n_scat, target->msg_per_bndl_max);
    775
    776		if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
    777			/* not enough to bundle */
    778			break;
    779
    780		scat_req = hif_scatter_req_get(target->dev->ar);
    781
    782		if (!scat_req) {
    783			/* no scatter resources  */
    784			ath6kl_dbg(ATH6KL_DBG_HTC,
    785				   "htc tx no more scatter resources\n");
    786			break;
    787		}
    788
    789		if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
    790			if (WMM_AC_BE == ac)
    791				/*
    792				 * BE, BK have priorities and bit
    793				 * positions reversed
    794				 */
    795				txb_mask = (1 << WMM_AC_BK);
    796			else
    797				/*
    798				 * any AC with priority lower than
    799				 * itself
    800				 */
    801				txb_mask = ((1 << ac) - 1);
    802
    803			/*
    804			 * when the scatter request resources drop below a
    805			 * certain threshold, disable Tx bundling for all
    806			 * AC's with priority lower than the current requesting
    807			 * AC. Otherwise re-enable Tx bundling for them
    808			 */
    809			if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
    810				target->tx_bndl_mask &= ~txb_mask;
    811			else
    812				target->tx_bndl_mask |= txb_mask;
    813		}
    814
    815		ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
    816			   n_scat);
    817
    818		scat_req->len = 0;
    819		scat_req->scat_entries = 0;
    820
    821		status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
    822						       scat_req, n_scat,
    823						       queue);
    824		if (status == -EAGAIN) {
    825			hif_scatter_req_add(target->dev->ar, scat_req);
    826			break;
    827		}
    828
    829		/* send path is always asynchronous */
    830		scat_req->complete = htc_async_tx_scat_complete;
    831		n_sent_bundle++;
    832		tot_pkts_bundle += scat_req->scat_entries;
    833
    834		ath6kl_dbg(ATH6KL_DBG_HTC,
    835			   "htc tx scatter bytes %d entries %d\n",
    836			   scat_req->len, scat_req->scat_entries);
    837
    838		for (i = 0; i < scat_req->scat_entries; i++) {
    839			packet = scat_req->scat_list[i].packet;
    840			trace_ath6kl_htc_tx(packet->status, packet->endpoint,
    841					    packet->buf, packet->act_len);
    842		}
    843
    844		ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
    845
    846		if (status)
    847			break;
    848	}
    849
    850	*sent_bundle = n_sent_bundle;
    851	*n_bundle_pkts = tot_pkts_bundle;
    852	ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
    853		   n_sent_bundle);
    854
    855	return;
    856}
    857
    858static void ath6kl_htc_tx_from_queue(struct htc_target *target,
    859				     struct htc_endpoint *endpoint)
    860{
    861	struct list_head txq;
    862	struct htc_packet *packet;
    863	int bundle_sent;
    864	int n_pkts_bundle;
    865	u8 ac = WMM_NUM_AC;
    866	int status;
    867
    868	spin_lock_bh(&target->tx_lock);
    869
    870	endpoint->tx_proc_cnt++;
    871	if (endpoint->tx_proc_cnt > 1) {
    872		endpoint->tx_proc_cnt--;
    873		spin_unlock_bh(&target->tx_lock);
    874		ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
    875		return;
    876	}
    877
    878	/*
    879	 * drain the endpoint TX queue for transmission as long
    880	 * as we have enough credits.
    881	 */
    882	INIT_LIST_HEAD(&txq);
    883
    884	if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
    885	    (WMI_CONTROL_SVC != endpoint->svc_id))
    886		ac = target->dev->ar->ep2ac_map[endpoint->eid];
    887
    888	while (true) {
    889		if (list_empty(&endpoint->txq))
    890			break;
    891
    892		ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
    893
    894		if (list_empty(&txq))
    895			break;
    896
    897		spin_unlock_bh(&target->tx_lock);
    898
    899		bundle_sent = 0;
    900		n_pkts_bundle = 0;
    901
    902		while (true) {
    903			/* try to send a bundle on each pass */
    904			if ((target->tx_bndl_mask) &&
    905			    (get_queue_depth(&txq) >=
    906			    HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
    907				int temp1 = 0, temp2 = 0;
    908
    909				/* check if bundling is enabled for an AC */
    910				if (target->tx_bndl_mask & (1 << ac)) {
    911					ath6kl_htc_tx_bundle(endpoint, &txq,
    912							     &temp1, &temp2);
    913					bundle_sent += temp1;
    914					n_pkts_bundle += temp2;
    915				}
    916			}
    917
    918			if (list_empty(&txq))
    919				break;
    920
    921			packet = list_first_entry(&txq, struct htc_packet,
    922						  list);
    923			list_del(&packet->list);
    924
    925			ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
    926					       0, packet->info.tx.seqno);
    927			status = ath6kl_htc_tx_issue(target, packet);
    928
    929			if (status) {
    930				packet->status = status;
    931				packet->completion(packet->context, packet);
    932			}
    933		}
    934
    935		spin_lock_bh(&target->tx_lock);
    936
    937		endpoint->ep_st.tx_bundles += bundle_sent;
    938		endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
    939
    940		/*
    941		 * if an AC has bundling disabled and no tx bundling
    942		 * has occured continously for a certain number of TX,
    943		 * enable tx bundling for this AC
    944		 */
    945		if (!bundle_sent) {
    946			if (!(target->tx_bndl_mask & (1 << ac)) &&
    947			    (ac < WMM_NUM_AC)) {
    948				if (++target->ac_tx_count[ac] >=
    949					TX_RESUME_BUNDLE_THRESHOLD) {
    950					target->ac_tx_count[ac] = 0;
    951					target->tx_bndl_mask |= (1 << ac);
    952				}
    953			}
    954		} else {
    955			/* tx bundling will reset the counter */
    956			if (ac < WMM_NUM_AC)
    957				target->ac_tx_count[ac] = 0;
    958		}
    959	}
    960
    961	endpoint->tx_proc_cnt = 0;
    962	spin_unlock_bh(&target->tx_lock);
    963}
    964
    965static bool ath6kl_htc_tx_try(struct htc_target *target,
    966			      struct htc_endpoint *endpoint,
    967			      struct htc_packet *tx_pkt)
    968{
    969	struct htc_ep_callbacks ep_cb;
    970	int txq_depth;
    971	bool overflow = false;
    972
    973	ep_cb = endpoint->ep_cb;
    974
    975	spin_lock_bh(&target->tx_lock);
    976	txq_depth = get_queue_depth(&endpoint->txq);
    977	spin_unlock_bh(&target->tx_lock);
    978
    979	if (txq_depth >= endpoint->max_txq_depth)
    980		overflow = true;
    981
    982	if (overflow)
    983		ath6kl_dbg(ATH6KL_DBG_HTC,
    984			   "htc tx overflow ep %d depth %d max %d\n",
    985			   endpoint->eid, txq_depth,
    986			   endpoint->max_txq_depth);
    987
    988	if (overflow && ep_cb.tx_full) {
    989		if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
    990		    HTC_SEND_FULL_DROP) {
    991			endpoint->ep_st.tx_dropped += 1;
    992			return false;
    993		}
    994	}
    995
    996	spin_lock_bh(&target->tx_lock);
    997	list_add_tail(&tx_pkt->list, &endpoint->txq);
    998	spin_unlock_bh(&target->tx_lock);
    999
   1000	ath6kl_htc_tx_from_queue(target, endpoint);
   1001
   1002	return true;
   1003}
   1004
   1005static void htc_chk_ep_txq(struct htc_target *target)
   1006{
   1007	struct htc_endpoint *endpoint;
   1008	struct htc_endpoint_credit_dist *cred_dist;
   1009
   1010	/*
   1011	 * Run through the credit distribution list to see if there are
   1012	 * packets queued. NOTE: no locks need to be taken since the
   1013	 * distribution list is not dynamic (cannot be re-ordered) and we
   1014	 * are not modifying any state.
   1015	 */
   1016	list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
   1017		endpoint = cred_dist->htc_ep;
   1018
   1019		spin_lock_bh(&target->tx_lock);
   1020		if (!list_empty(&endpoint->txq)) {
   1021			ath6kl_dbg(ATH6KL_DBG_HTC,
   1022				   "htc creds ep %d credits %d pkts %d\n",
   1023				   cred_dist->endpoint,
   1024				   endpoint->cred_dist.credits,
   1025				   get_queue_depth(&endpoint->txq));
   1026			spin_unlock_bh(&target->tx_lock);
   1027			/*
   1028			 * Try to start the stalled queue, this list is
   1029			 * ordered by priority. If there are credits
   1030			 * available the highest priority queue will get a
   1031			 * chance to reclaim credits from lower priority
   1032			 * ones.
   1033			 */
   1034			ath6kl_htc_tx_from_queue(target, endpoint);
   1035			spin_lock_bh(&target->tx_lock);
   1036		}
   1037		spin_unlock_bh(&target->tx_lock);
   1038	}
   1039}
   1040
   1041static int htc_setup_tx_complete(struct htc_target *target)
   1042{
   1043	struct htc_packet *send_pkt = NULL;
   1044	int status;
   1045
   1046	send_pkt = htc_get_control_buf(target, true);
   1047
   1048	if (!send_pkt)
   1049		return -ENOMEM;
   1050
   1051	if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
   1052		struct htc_setup_comp_ext_msg *setup_comp_ext;
   1053		u32 flags = 0;
   1054
   1055		setup_comp_ext =
   1056		    (struct htc_setup_comp_ext_msg *)send_pkt->buf;
   1057		memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
   1058		setup_comp_ext->msg_id =
   1059			cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
   1060
   1061		if (target->msg_per_bndl_max > 0) {
   1062			/* Indicate HTC bundling to the target */
   1063			flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
   1064			setup_comp_ext->msg_per_rxbndl =
   1065						target->msg_per_bndl_max;
   1066		}
   1067
   1068		memcpy(&setup_comp_ext->flags, &flags,
   1069		       sizeof(setup_comp_ext->flags));
   1070		set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
   1071				 sizeof(struct htc_setup_comp_ext_msg),
   1072				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
   1073
   1074	} else {
   1075		struct htc_setup_comp_msg *setup_comp;
   1076		setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
   1077		memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
   1078		setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
   1079		set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
   1080				 sizeof(struct htc_setup_comp_msg),
   1081				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
   1082	}
   1083
   1084	/* we want synchronous operation */
   1085	send_pkt->completion = NULL;
   1086	ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
   1087	status = ath6kl_htc_tx_issue(target, send_pkt);
   1088	htc_reclaim_txctrl_buf(target, send_pkt);
   1089
   1090	return status;
   1091}
   1092
   1093static void ath6kl_htc_set_credit_dist(struct htc_target *target,
   1094				struct ath6kl_htc_credit_info *credit_info,
   1095				u16 srvc_pri_order[], int list_len)
   1096{
   1097	struct htc_endpoint *endpoint;
   1098	int i, ep;
   1099
   1100	target->credit_info = credit_info;
   1101
   1102	list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
   1103		      &target->cred_dist_list);
   1104
   1105	for (i = 0; i < list_len; i++) {
   1106		for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
   1107			endpoint = &target->endpoint[ep];
   1108			if (endpoint->svc_id == srvc_pri_order[i]) {
   1109				list_add_tail(&endpoint->cred_dist.list,
   1110					      &target->cred_dist_list);
   1111				break;
   1112			}
   1113		}
   1114		if (ep >= ENDPOINT_MAX) {
   1115			WARN_ON(1);
   1116			return;
   1117		}
   1118	}
   1119}
   1120
   1121static int ath6kl_htc_mbox_tx(struct htc_target *target,
   1122			      struct htc_packet *packet)
   1123{
   1124	struct htc_endpoint *endpoint;
   1125	struct list_head queue;
   1126
   1127	ath6kl_dbg(ATH6KL_DBG_HTC,
   1128		   "htc tx ep id %d buf 0x%p len %d\n",
   1129		   packet->endpoint, packet->buf, packet->act_len);
   1130
   1131	if (packet->endpoint >= ENDPOINT_MAX) {
   1132		WARN_ON(1);
   1133		return -EINVAL;
   1134	}
   1135
   1136	endpoint = &target->endpoint[packet->endpoint];
   1137
   1138	if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
   1139		packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
   1140				 -ECANCELED : -ENOSPC;
   1141		INIT_LIST_HEAD(&queue);
   1142		list_add(&packet->list, &queue);
   1143		htc_tx_complete(endpoint, &queue);
   1144	}
   1145
   1146	return 0;
   1147}
   1148
   1149/* flush endpoint TX queue */
   1150static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
   1151			   enum htc_endpoint_id eid, u16 tag)
   1152{
   1153	struct htc_packet *packet, *tmp_pkt;
   1154	struct list_head discard_q, container;
   1155	struct htc_endpoint *endpoint = &target->endpoint[eid];
   1156
   1157	if (!endpoint->svc_id) {
   1158		WARN_ON(1);
   1159		return;
   1160	}
   1161
   1162	/* initialize the discard queue */
   1163	INIT_LIST_HEAD(&discard_q);
   1164
   1165	spin_lock_bh(&target->tx_lock);
   1166
   1167	list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
   1168		if ((tag == HTC_TX_PACKET_TAG_ALL) ||
   1169		    (tag == packet->info.tx.tag))
   1170			list_move_tail(&packet->list, &discard_q);
   1171	}
   1172
   1173	spin_unlock_bh(&target->tx_lock);
   1174
   1175	list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
   1176		packet->status = -ECANCELED;
   1177		list_del(&packet->list);
   1178		ath6kl_dbg(ATH6KL_DBG_HTC,
   1179			   "htc tx flushing pkt 0x%p len %d  ep %d tag 0x%x\n",
   1180			   packet, packet->act_len,
   1181			   packet->endpoint, packet->info.tx.tag);
   1182
   1183		INIT_LIST_HEAD(&container);
   1184		list_add_tail(&packet->list, &container);
   1185		htc_tx_complete(endpoint, &container);
   1186	}
   1187}
   1188
   1189static void ath6kl_htc_flush_txep_all(struct htc_target *target)
   1190{
   1191	struct htc_endpoint *endpoint;
   1192	int i;
   1193
   1194	dump_cred_dist_stats(target);
   1195
   1196	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
   1197		endpoint = &target->endpoint[i];
   1198		if (endpoint->svc_id == 0)
   1199			/* not in use.. */
   1200			continue;
   1201		ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
   1202	}
   1203}
   1204
   1205static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
   1206					     enum htc_endpoint_id eid,
   1207					     bool active)
   1208{
   1209	struct htc_endpoint *endpoint = &target->endpoint[eid];
   1210	bool dist = false;
   1211
   1212	if (endpoint->svc_id == 0) {
   1213		WARN_ON(1);
   1214		return;
   1215	}
   1216
   1217	spin_lock_bh(&target->tx_lock);
   1218
   1219	if (active) {
   1220		if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
   1221			endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
   1222			dist = true;
   1223		}
   1224	} else {
   1225		if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
   1226			endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
   1227			dist = true;
   1228		}
   1229	}
   1230
   1231	if (dist) {
   1232		endpoint->cred_dist.txq_depth =
   1233			get_queue_depth(&endpoint->txq);
   1234
   1235		ath6kl_dbg(ATH6KL_DBG_HTC,
   1236			   "htc tx activity ctxt 0x%p dist 0x%p\n",
   1237			   target->credit_info, &target->cred_dist_list);
   1238
   1239		ath6kl_credit_distribute(target->credit_info,
   1240					 &target->cred_dist_list,
   1241					 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
   1242	}
   1243
   1244	spin_unlock_bh(&target->tx_lock);
   1245
   1246	if (dist && !active)
   1247		htc_chk_ep_txq(target);
   1248}
   1249
   1250/* HTC Rx */
   1251
   1252static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
   1253					      int n_look_ahds)
   1254{
   1255	endpoint->ep_st.rx_pkts++;
   1256	if (n_look_ahds == 1)
   1257		endpoint->ep_st.rx_lkahds++;
   1258	else if (n_look_ahds > 1)
   1259		endpoint->ep_st.rx_bundle_lkahd++;
   1260}
   1261
   1262static inline bool htc_valid_rx_frame_len(struct htc_target *target,
   1263					  enum htc_endpoint_id eid, int len)
   1264{
   1265	return (eid == target->dev->ar->ctrl_ep) ?
   1266		len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
   1267}
   1268
   1269static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
   1270{
   1271	struct list_head queue;
   1272
   1273	INIT_LIST_HEAD(&queue);
   1274	list_add_tail(&packet->list, &queue);
   1275	return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
   1276}
   1277
   1278static void htc_reclaim_rxbuf(struct htc_target *target,
   1279			      struct htc_packet *packet,
   1280			      struct htc_endpoint *ep)
   1281{
   1282	if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
   1283		htc_rxpkt_reset(packet);
   1284		packet->status = -ECANCELED;
   1285		ep->ep_cb.rx(ep->target, packet);
   1286	} else {
   1287		htc_rxpkt_reset(packet);
   1288		htc_add_rxbuf((void *)(target), packet);
   1289	}
   1290}
   1291
   1292static void reclaim_rx_ctrl_buf(struct htc_target *target,
   1293				struct htc_packet *packet)
   1294{
   1295	spin_lock_bh(&target->htc_lock);
   1296	list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
   1297	spin_unlock_bh(&target->htc_lock);
   1298}
   1299
   1300static int ath6kl_htc_rx_packet(struct htc_target *target,
   1301				struct htc_packet *packet,
   1302				u32 rx_len)
   1303{
   1304	struct ath6kl_device *dev = target->dev;
   1305	u32 padded_len;
   1306	int status;
   1307
   1308	padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
   1309
   1310	if (padded_len > packet->buf_len) {
   1311		ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
   1312			   padded_len, rx_len, packet->buf_len);
   1313		return -ENOMEM;
   1314	}
   1315
   1316	ath6kl_dbg(ATH6KL_DBG_HTC,
   1317		   "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
   1318		   packet, packet->info.rx.exp_hdr,
   1319		   padded_len, dev->ar->mbox_info.htc_addr);
   1320
   1321	status = hif_read_write_sync(dev->ar,
   1322				     dev->ar->mbox_info.htc_addr,
   1323				     packet->buf, padded_len,
   1324				     HIF_RD_SYNC_BLOCK_FIX);
   1325
   1326	packet->status = status;
   1327
   1328	return status;
   1329}
   1330
   1331/*
   1332 * optimization for recv packets, we can indicate a
   1333 * "hint" that there are more  single-packets to fetch
   1334 * on this endpoint.
   1335 */
   1336static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
   1337				       struct htc_endpoint *endpoint,
   1338				       struct htc_packet *packet)
   1339{
   1340	struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
   1341
   1342	if (htc_hdr->eid == packet->endpoint) {
   1343		if (!list_empty(&endpoint->rx_bufq))
   1344			packet->info.rx.indicat_flags |=
   1345					HTC_RX_FLAGS_INDICATE_MORE_PKTS;
   1346	}
   1347}
   1348
   1349static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
   1350{
   1351	struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
   1352
   1353	if (ep_cb.rx_refill_thresh > 0) {
   1354		spin_lock_bh(&endpoint->target->rx_lock);
   1355		if (get_queue_depth(&endpoint->rx_bufq)
   1356		    < ep_cb.rx_refill_thresh) {
   1357			spin_unlock_bh(&endpoint->target->rx_lock);
   1358			ep_cb.rx_refill(endpoint->target, endpoint->eid);
   1359			return;
   1360		}
   1361		spin_unlock_bh(&endpoint->target->rx_lock);
   1362	}
   1363}
   1364
   1365/* This function is called with rx_lock held */
   1366static int ath6kl_htc_rx_setup(struct htc_target *target,
   1367			       struct htc_endpoint *ep,
   1368			       u32 *lk_ahds, struct list_head *queue, int n_msg)
   1369{
   1370	struct htc_packet *packet;
   1371	/* FIXME: type of lk_ahds can't be right */
   1372	struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
   1373	struct htc_ep_callbacks ep_cb;
   1374	int status = 0, j, full_len;
   1375	bool no_recycle;
   1376
   1377	full_len = CALC_TXRX_PADDED_LEN(target,
   1378					le16_to_cpu(htc_hdr->payld_len) +
   1379					sizeof(*htc_hdr));
   1380
   1381	if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
   1382		ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
   1383			    htc_hdr->eid, htc_hdr->flags,
   1384			    le16_to_cpu(htc_hdr->payld_len));
   1385		return -EINVAL;
   1386	}
   1387
   1388	ep_cb = ep->ep_cb;
   1389	for (j = 0; j < n_msg; j++) {
   1390		/*
   1391		 * Reset flag, any packets allocated using the
   1392		 * rx_alloc() API cannot be recycled on
   1393		 * cleanup,they must be explicitly returned.
   1394		 */
   1395		no_recycle = false;
   1396
   1397		if (ep_cb.rx_allocthresh &&
   1398		    (full_len > ep_cb.rx_alloc_thresh)) {
   1399			ep->ep_st.rx_alloc_thresh_hit += 1;
   1400			ep->ep_st.rxalloc_thresh_byte +=
   1401				le16_to_cpu(htc_hdr->payld_len);
   1402
   1403			spin_unlock_bh(&target->rx_lock);
   1404			no_recycle = true;
   1405
   1406			packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
   1407						      full_len);
   1408			spin_lock_bh(&target->rx_lock);
   1409		} else {
   1410			/* refill handler is being used */
   1411			if (list_empty(&ep->rx_bufq)) {
   1412				if (ep_cb.rx_refill) {
   1413					spin_unlock_bh(&target->rx_lock);
   1414					ep_cb.rx_refill(ep->target, ep->eid);
   1415					spin_lock_bh(&target->rx_lock);
   1416				}
   1417			}
   1418
   1419			if (list_empty(&ep->rx_bufq)) {
   1420				packet = NULL;
   1421			} else {
   1422				packet = list_first_entry(&ep->rx_bufq,
   1423						struct htc_packet, list);
   1424				list_del(&packet->list);
   1425			}
   1426		}
   1427
   1428		if (!packet) {
   1429			target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
   1430			target->ep_waiting = ep->eid;
   1431			return -ENOSPC;
   1432		}
   1433
   1434		/* clear flags */
   1435		packet->info.rx.rx_flags = 0;
   1436		packet->info.rx.indicat_flags = 0;
   1437		packet->status = 0;
   1438
   1439		if (no_recycle)
   1440			/*
   1441			 * flag that these packets cannot be
   1442			 * recycled, they have to be returned to
   1443			 * the user
   1444			 */
   1445			packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
   1446
   1447		/* Caller needs to free this upon any failure */
   1448		list_add_tail(&packet->list, queue);
   1449
   1450		if (target->htc_flags & HTC_OP_STATE_STOPPING) {
   1451			status = -ECANCELED;
   1452			break;
   1453		}
   1454
   1455		if (j) {
   1456			packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
   1457			packet->info.rx.exp_hdr = 0xFFFFFFFF;
   1458		} else
   1459			/* set expected look ahead */
   1460			packet->info.rx.exp_hdr = *lk_ahds;
   1461
   1462		packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
   1463			HTC_HDR_LENGTH;
   1464	}
   1465
   1466	return status;
   1467}
   1468
   1469static int ath6kl_htc_rx_alloc(struct htc_target *target,
   1470			       u32 lk_ahds[], int msg,
   1471			       struct htc_endpoint *endpoint,
   1472			       struct list_head *queue)
   1473{
   1474	int status = 0;
   1475	struct htc_packet *packet, *tmp_pkt;
   1476	struct htc_frame_hdr *htc_hdr;
   1477	int i, n_msg;
   1478
   1479	spin_lock_bh(&target->rx_lock);
   1480
   1481	for (i = 0; i < msg; i++) {
   1482		htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
   1483
   1484		if (htc_hdr->eid >= ENDPOINT_MAX) {
   1485			ath6kl_err("invalid ep in look-ahead: %d\n",
   1486				   htc_hdr->eid);
   1487			status = -ENOMEM;
   1488			break;
   1489		}
   1490
   1491		if (htc_hdr->eid != endpoint->eid) {
   1492			ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
   1493				   htc_hdr->eid, endpoint->eid, i);
   1494			status = -ENOMEM;
   1495			break;
   1496		}
   1497
   1498		if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
   1499			ath6kl_err("payload len %d exceeds max htc : %d !\n",
   1500				   htc_hdr->payld_len,
   1501				   (u32) HTC_MAX_PAYLOAD_LENGTH);
   1502			status = -ENOMEM;
   1503			break;
   1504		}
   1505
   1506		if (endpoint->svc_id == 0) {
   1507			ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
   1508			status = -ENOMEM;
   1509			break;
   1510		}
   1511
   1512		if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
   1513			/*
   1514			 * HTC header indicates that every packet to follow
   1515			 * has the same padded length so that it can be
   1516			 * optimally fetched as a full bundle.
   1517			 */
   1518			n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
   1519				HTC_FLG_RX_BNDL_CNT_S;
   1520
   1521			/* the count doesn't include the starter frame */
   1522			n_msg++;
   1523			if (n_msg > target->msg_per_bndl_max) {
   1524				status = -ENOMEM;
   1525				break;
   1526			}
   1527
   1528			endpoint->ep_st.rx_bundle_from_hdr += 1;
   1529			ath6kl_dbg(ATH6KL_DBG_HTC,
   1530				   "htc rx bundle pkts %d\n",
   1531				   n_msg);
   1532		} else
   1533			/* HTC header only indicates 1 message to fetch */
   1534			n_msg = 1;
   1535
   1536		/* Setup packet buffers for each message */
   1537		status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
   1538					     queue, n_msg);
   1539
   1540		/*
   1541		 * This is due to unavailability of buffers to rx entire data.
   1542		 * Return no error so that free buffers from queue can be used
   1543		 * to receive partial data.
   1544		 */
   1545		if (status == -ENOSPC) {
   1546			spin_unlock_bh(&target->rx_lock);
   1547			return 0;
   1548		}
   1549
   1550		if (status)
   1551			break;
   1552	}
   1553
   1554	spin_unlock_bh(&target->rx_lock);
   1555
   1556	if (status) {
   1557		list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
   1558			list_del(&packet->list);
   1559			htc_reclaim_rxbuf(target, packet,
   1560					  &target->endpoint[packet->endpoint]);
   1561		}
   1562	}
   1563
   1564	return status;
   1565}
   1566
   1567static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
   1568{
   1569	if (packets->endpoint != ENDPOINT_0) {
   1570		WARN_ON(1);
   1571		return;
   1572	}
   1573
   1574	if (packets->status == -ECANCELED) {
   1575		reclaim_rx_ctrl_buf(context, packets);
   1576		return;
   1577	}
   1578
   1579	if (packets->act_len > 0) {
   1580		ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
   1581			   packets->act_len + HTC_HDR_LENGTH);
   1582
   1583		ath6kl_dbg_dump(ATH6KL_DBG_HTC,
   1584				"htc rx unexpected endpoint 0 message", "",
   1585				packets->buf - HTC_HDR_LENGTH,
   1586				packets->act_len + HTC_HDR_LENGTH);
   1587	}
   1588
   1589	htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
   1590}
   1591
   1592static void htc_proc_cred_rpt(struct htc_target *target,
   1593			      struct htc_credit_report *rpt,
   1594			      int n_entries,
   1595			      enum htc_endpoint_id from_ep)
   1596{
   1597	struct htc_endpoint *endpoint;
   1598	int tot_credits = 0, i;
   1599	bool dist = false;
   1600
   1601	spin_lock_bh(&target->tx_lock);
   1602
   1603	for (i = 0; i < n_entries; i++, rpt++) {
   1604		if (rpt->eid >= ENDPOINT_MAX) {
   1605			WARN_ON(1);
   1606			spin_unlock_bh(&target->tx_lock);
   1607			return;
   1608		}
   1609
   1610		endpoint = &target->endpoint[rpt->eid];
   1611
   1612		ath6kl_dbg(ATH6KL_DBG_CREDIT,
   1613			   "credit report ep %d credits %d\n",
   1614			   rpt->eid, rpt->credits);
   1615
   1616		endpoint->ep_st.tx_cred_rpt += 1;
   1617		endpoint->ep_st.cred_retnd += rpt->credits;
   1618
   1619		if (from_ep == rpt->eid) {
   1620			/*
   1621			 * This credit report arrived on the same endpoint
   1622			 * indicating it arrived in an RX packet.
   1623			 */
   1624			endpoint->ep_st.cred_from_rx += rpt->credits;
   1625			endpoint->ep_st.cred_rpt_from_rx += 1;
   1626		} else if (from_ep == ENDPOINT_0) {
   1627			/* credit arrived on endpoint 0 as a NULL message */
   1628			endpoint->ep_st.cred_from_ep0 += rpt->credits;
   1629			endpoint->ep_st.cred_rpt_ep0 += 1;
   1630		} else {
   1631			endpoint->ep_st.cred_from_other += rpt->credits;
   1632			endpoint->ep_st.cred_rpt_from_other += 1;
   1633		}
   1634
   1635		if (rpt->eid == ENDPOINT_0)
   1636			/* always give endpoint 0 credits back */
   1637			endpoint->cred_dist.credits += rpt->credits;
   1638		else {
   1639			endpoint->cred_dist.cred_to_dist += rpt->credits;
   1640			dist = true;
   1641		}
   1642
   1643		/*
   1644		 * Refresh tx depth for distribution function that will
   1645		 * recover these credits NOTE: this is only valid when
   1646		 * there are credits to recover!
   1647		 */
   1648		endpoint->cred_dist.txq_depth =
   1649			get_queue_depth(&endpoint->txq);
   1650
   1651		tot_credits += rpt->credits;
   1652	}
   1653
   1654	if (dist) {
   1655		/*
   1656		 * This was a credit return based on a completed send
   1657		 * operations note, this is done with the lock held
   1658		 */
   1659		ath6kl_credit_distribute(target->credit_info,
   1660					 &target->cred_dist_list,
   1661					 HTC_CREDIT_DIST_SEND_COMPLETE);
   1662	}
   1663
   1664	spin_unlock_bh(&target->tx_lock);
   1665
   1666	if (tot_credits)
   1667		htc_chk_ep_txq(target);
   1668}
   1669
   1670static int htc_parse_trailer(struct htc_target *target,
   1671			     struct htc_record_hdr *record,
   1672			     u8 *record_buf, u32 *next_lk_ahds,
   1673			     enum htc_endpoint_id endpoint,
   1674			     int *n_lk_ahds)
   1675{
   1676	struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
   1677	struct htc_lookahead_report *lk_ahd;
   1678	int len;
   1679
   1680	switch (record->rec_id) {
   1681	case HTC_RECORD_CREDITS:
   1682		len = record->len / sizeof(struct htc_credit_report);
   1683		if (!len) {
   1684			WARN_ON(1);
   1685			return -EINVAL;
   1686		}
   1687
   1688		htc_proc_cred_rpt(target,
   1689				  (struct htc_credit_report *) record_buf,
   1690				  len, endpoint);
   1691		break;
   1692	case HTC_RECORD_LOOKAHEAD:
   1693		len = record->len / sizeof(*lk_ahd);
   1694		if (!len) {
   1695			WARN_ON(1);
   1696			return -EINVAL;
   1697		}
   1698
   1699		lk_ahd = (struct htc_lookahead_report *) record_buf;
   1700		if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
   1701		    next_lk_ahds) {
   1702			ath6kl_dbg(ATH6KL_DBG_HTC,
   1703				   "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
   1704				   lk_ahd->pre_valid, lk_ahd->post_valid);
   1705
   1706			/* look ahead bytes are valid, copy them over */
   1707			memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
   1708
   1709			ath6kl_dbg_dump(ATH6KL_DBG_HTC,
   1710					"htc rx next look ahead",
   1711					"", next_lk_ahds, 4);
   1712
   1713			*n_lk_ahds = 1;
   1714		}
   1715		break;
   1716	case HTC_RECORD_LOOKAHEAD_BUNDLE:
   1717		len = record->len / sizeof(*bundle_lkahd_rpt);
   1718		if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
   1719			WARN_ON(1);
   1720			return -EINVAL;
   1721		}
   1722
   1723		if (next_lk_ahds) {
   1724			int i;
   1725
   1726			bundle_lkahd_rpt =
   1727				(struct htc_bundle_lkahd_rpt *) record_buf;
   1728
   1729			ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
   1730					"", record_buf, record->len);
   1731
   1732			for (i = 0; i < len; i++) {
   1733				memcpy((u8 *)&next_lk_ahds[i],
   1734				       bundle_lkahd_rpt->lk_ahd, 4);
   1735				bundle_lkahd_rpt++;
   1736			}
   1737
   1738			*n_lk_ahds = i;
   1739		}
   1740		break;
   1741	default:
   1742		ath6kl_err("unhandled record: id:%d len:%d\n",
   1743			   record->rec_id, record->len);
   1744		break;
   1745	}
   1746
   1747	return 0;
   1748}
   1749
   1750static int htc_proc_trailer(struct htc_target *target,
   1751			    u8 *buf, int len, u32 *next_lk_ahds,
   1752			    int *n_lk_ahds, enum htc_endpoint_id endpoint)
   1753{
   1754	struct htc_record_hdr *record;
   1755	int orig_len;
   1756	int status;
   1757	u8 *record_buf;
   1758	u8 *orig_buf;
   1759
   1760	ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
   1761	ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
   1762
   1763	orig_buf = buf;
   1764	orig_len = len;
   1765	status = 0;
   1766
   1767	while (len > 0) {
   1768		if (len < sizeof(struct htc_record_hdr)) {
   1769			status = -ENOMEM;
   1770			break;
   1771		}
   1772		/* these are byte aligned structs */
   1773		record = (struct htc_record_hdr *) buf;
   1774		len -= sizeof(struct htc_record_hdr);
   1775		buf += sizeof(struct htc_record_hdr);
   1776
   1777		if (record->len > len) {
   1778			ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
   1779				   record->len, record->rec_id, len);
   1780			status = -ENOMEM;
   1781			break;
   1782		}
   1783		record_buf = buf;
   1784
   1785		status = htc_parse_trailer(target, record, record_buf,
   1786					   next_lk_ahds, endpoint, n_lk_ahds);
   1787
   1788		if (status)
   1789			break;
   1790
   1791		/* advance buffer past this record for next time around */
   1792		buf += record->len;
   1793		len -= record->len;
   1794	}
   1795
   1796	if (status)
   1797		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
   1798				"", orig_buf, orig_len);
   1799
   1800	return status;
   1801}
   1802
   1803static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
   1804				     struct htc_packet *packet,
   1805				     u32 *next_lkahds, int *n_lkahds)
   1806{
   1807	int status = 0;
   1808	u16 payload_len;
   1809	u32 lk_ahd;
   1810	struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
   1811
   1812	if (n_lkahds != NULL)
   1813		*n_lkahds = 0;
   1814
   1815	/*
   1816	 * NOTE: we cannot assume the alignment of buf, so we use the safe
   1817	 * macros to retrieve 16 bit fields.
   1818	 */
   1819	payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
   1820
   1821	memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
   1822
   1823	if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
   1824		/*
   1825		 * Refresh the expected header and the actual length as it
   1826		 * was unknown when this packet was grabbed as part of the
   1827		 * bundle.
   1828		 */
   1829		packet->info.rx.exp_hdr = lk_ahd;
   1830		packet->act_len = payload_len + HTC_HDR_LENGTH;
   1831
   1832		/* validate the actual header that was refreshed  */
   1833		if (packet->act_len > packet->buf_len) {
   1834			ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
   1835				   payload_len, lk_ahd);
   1836			/*
   1837			 * Limit this to max buffer just to print out some
   1838			 * of the buffer.
   1839			 */
   1840			packet->act_len = min(packet->act_len, packet->buf_len);
   1841			status = -ENOMEM;
   1842			goto fail_rx;
   1843		}
   1844
   1845		if (packet->endpoint != htc_hdr->eid) {
   1846			ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
   1847				   htc_hdr->eid, packet->endpoint);
   1848			status = -ENOMEM;
   1849			goto fail_rx;
   1850		}
   1851	}
   1852
   1853	if (lk_ahd != packet->info.rx.exp_hdr) {
   1854		ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
   1855			   __func__, packet, packet->info.rx.rx_flags);
   1856		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
   1857				"", &packet->info.rx.exp_hdr, 4);
   1858		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
   1859				"", (u8 *)&lk_ahd, sizeof(lk_ahd));
   1860		status = -ENOMEM;
   1861		goto fail_rx;
   1862	}
   1863
   1864	if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
   1865		if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
   1866		    htc_hdr->ctrl[0] > payload_len) {
   1867			ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
   1868				   __func__, payload_len, htc_hdr->ctrl[0]);
   1869			status = -ENOMEM;
   1870			goto fail_rx;
   1871		}
   1872
   1873		if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
   1874			next_lkahds = NULL;
   1875			n_lkahds = NULL;
   1876		}
   1877
   1878		status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
   1879					  + payload_len - htc_hdr->ctrl[0],
   1880					  htc_hdr->ctrl[0], next_lkahds,
   1881					   n_lkahds, packet->endpoint);
   1882
   1883		if (status)
   1884			goto fail_rx;
   1885
   1886		packet->act_len -= htc_hdr->ctrl[0];
   1887	}
   1888
   1889	packet->buf += HTC_HDR_LENGTH;
   1890	packet->act_len -= HTC_HDR_LENGTH;
   1891
   1892fail_rx:
   1893	if (status)
   1894		ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
   1895				"", packet->buf, packet->act_len);
   1896
   1897	return status;
   1898}
   1899
   1900static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
   1901				   struct htc_packet *packet)
   1902{
   1903		ath6kl_dbg(ATH6KL_DBG_HTC,
   1904			   "htc rx complete ep %d packet 0x%p\n",
   1905			   endpoint->eid, packet);
   1906
   1907		endpoint->ep_cb.rx(endpoint->target, packet);
   1908}
   1909
   1910static int ath6kl_htc_rx_bundle(struct htc_target *target,
   1911				struct list_head *rxq,
   1912				struct list_head *sync_compq,
   1913				int *n_pkt_fetched, bool part_bundle)
   1914{
   1915	struct hif_scatter_req *scat_req;
   1916	struct htc_packet *packet;
   1917	int rem_space = target->max_rx_bndl_sz;
   1918	int n_scat_pkt, status = 0, i, len;
   1919
   1920	n_scat_pkt = get_queue_depth(rxq);
   1921	n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
   1922
   1923	if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
   1924		/*
   1925		 * We were forced to split this bundle receive operation
   1926		 * all packets in this partial bundle must have their
   1927		 * lookaheads ignored.
   1928		 */
   1929		part_bundle = true;
   1930
   1931		/*
   1932		 * This would only happen if the target ignored our max
   1933		 * bundle limit.
   1934		 */
   1935		ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
   1936			    __func__, get_queue_depth(rxq), n_scat_pkt);
   1937	}
   1938
   1939	len = 0;
   1940
   1941	ath6kl_dbg(ATH6KL_DBG_HTC,
   1942		   "htc rx bundle depth %d pkts %d\n",
   1943		   get_queue_depth(rxq), n_scat_pkt);
   1944
   1945	scat_req = hif_scatter_req_get(target->dev->ar);
   1946
   1947	if (scat_req == NULL)
   1948		goto fail_rx_pkt;
   1949
   1950	for (i = 0; i < n_scat_pkt; i++) {
   1951		int pad_len;
   1952
   1953		packet = list_first_entry(rxq, struct htc_packet, list);
   1954		list_del(&packet->list);
   1955
   1956		pad_len = CALC_TXRX_PADDED_LEN(target,
   1957						   packet->act_len);
   1958
   1959		if ((rem_space - pad_len) < 0) {
   1960			list_add(&packet->list, rxq);
   1961			break;
   1962		}
   1963
   1964		rem_space -= pad_len;
   1965
   1966		if (part_bundle || (i < (n_scat_pkt - 1)))
   1967			/*
   1968			 * Packet 0..n-1 cannot be checked for look-aheads
   1969			 * since we are fetching a bundle the last packet
   1970			 * however can have it's lookahead used
   1971			 */
   1972			packet->info.rx.rx_flags |=
   1973			    HTC_RX_PKT_IGNORE_LOOKAHEAD;
   1974
   1975		/* NOTE: 1 HTC packet per scatter entry */
   1976		scat_req->scat_list[i].buf = packet->buf;
   1977		scat_req->scat_list[i].len = pad_len;
   1978
   1979		packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
   1980
   1981		list_add_tail(&packet->list, sync_compq);
   1982
   1983		WARN_ON(!scat_req->scat_list[i].len);
   1984		len += scat_req->scat_list[i].len;
   1985	}
   1986
   1987	scat_req->len = len;
   1988	scat_req->scat_entries = i;
   1989
   1990	status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
   1991
   1992	if (!status)
   1993		*n_pkt_fetched = i;
   1994
   1995	/* free scatter request */
   1996	hif_scatter_req_add(target->dev->ar, scat_req);
   1997
   1998fail_rx_pkt:
   1999
   2000	return status;
   2001}
   2002
   2003static int ath6kl_htc_rx_process_packets(struct htc_target *target,
   2004					 struct list_head *comp_pktq,
   2005					 u32 lk_ahds[],
   2006					 int *n_lk_ahd)
   2007{
   2008	struct htc_packet *packet, *tmp_pkt;
   2009	struct htc_endpoint *ep;
   2010	int status = 0;
   2011
   2012	list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
   2013		ep = &target->endpoint[packet->endpoint];
   2014
   2015		trace_ath6kl_htc_rx(packet->status, packet->endpoint,
   2016				    packet->buf, packet->act_len);
   2017
   2018		/* process header for each of the recv packet */
   2019		status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
   2020						   n_lk_ahd);
   2021		if (status)
   2022			return status;
   2023
   2024		list_del(&packet->list);
   2025
   2026		if (list_empty(comp_pktq)) {
   2027			/*
   2028			 * Last packet's more packet flag is set
   2029			 * based on the lookahead.
   2030			 */
   2031			if (*n_lk_ahd > 0)
   2032				ath6kl_htc_rx_set_indicate(lk_ahds[0],
   2033							   ep, packet);
   2034		} else
   2035			/*
   2036			 * Packets in a bundle automatically have
   2037			 * this flag set.
   2038			 */
   2039			packet->info.rx.indicat_flags |=
   2040				HTC_RX_FLAGS_INDICATE_MORE_PKTS;
   2041
   2042		ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
   2043
   2044		if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
   2045			ep->ep_st.rx_bundl += 1;
   2046
   2047		ath6kl_htc_rx_complete(ep, packet);
   2048	}
   2049
   2050	return status;
   2051}
   2052
   2053static int ath6kl_htc_rx_fetch(struct htc_target *target,
   2054			       struct list_head *rx_pktq,
   2055			       struct list_head *comp_pktq)
   2056{
   2057	int fetched_pkts;
   2058	bool part_bundle = false;
   2059	int status = 0;
   2060	struct list_head tmp_rxq;
   2061	struct htc_packet *packet, *tmp_pkt;
   2062
   2063	/* now go fetch the list of HTC packets */
   2064	while (!list_empty(rx_pktq)) {
   2065		fetched_pkts = 0;
   2066
   2067		INIT_LIST_HEAD(&tmp_rxq);
   2068
   2069		if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
   2070			/*
   2071			 * There are enough packets to attempt a
   2072			 * bundle transfer and recv bundling is
   2073			 * allowed.
   2074			 */
   2075			status = ath6kl_htc_rx_bundle(target, rx_pktq,
   2076						      &tmp_rxq,
   2077						      &fetched_pkts,
   2078						      part_bundle);
   2079			if (status)
   2080				goto fail_rx;
   2081
   2082			if (!list_empty(rx_pktq))
   2083				part_bundle = true;
   2084
   2085			list_splice_tail_init(&tmp_rxq, comp_pktq);
   2086		}
   2087
   2088		if (!fetched_pkts) {
   2089			packet = list_first_entry(rx_pktq, struct htc_packet,
   2090						   list);
   2091
   2092			/* fully synchronous */
   2093			packet->completion = NULL;
   2094
   2095			if (!list_is_singular(rx_pktq))
   2096				/*
   2097				 * look_aheads in all packet
   2098				 * except the last one in the
   2099				 * bundle must be ignored
   2100				 */
   2101				packet->info.rx.rx_flags |=
   2102					HTC_RX_PKT_IGNORE_LOOKAHEAD;
   2103
   2104			/* go fetch the packet */
   2105			status = ath6kl_htc_rx_packet(target, packet,
   2106						      packet->act_len);
   2107
   2108			list_move_tail(&packet->list, &tmp_rxq);
   2109
   2110			if (status)
   2111				goto fail_rx;
   2112
   2113			list_splice_tail_init(&tmp_rxq, comp_pktq);
   2114		}
   2115	}
   2116
   2117	return 0;
   2118
   2119fail_rx:
   2120
   2121	/*
   2122	 * Cleanup any packets we allocated but didn't use to
   2123	 * actually fetch any packets.
   2124	 */
   2125
   2126	list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
   2127		list_del(&packet->list);
   2128		htc_reclaim_rxbuf(target, packet,
   2129				  &target->endpoint[packet->endpoint]);
   2130	}
   2131
   2132	list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
   2133		list_del(&packet->list);
   2134		htc_reclaim_rxbuf(target, packet,
   2135				  &target->endpoint[packet->endpoint]);
   2136	}
   2137
   2138	return status;
   2139}
   2140
   2141int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
   2142				     u32 msg_look_ahead, int *num_pkts)
   2143{
   2144	struct htc_packet *packets, *tmp_pkt;
   2145	struct htc_endpoint *endpoint;
   2146	struct list_head rx_pktq, comp_pktq;
   2147	int status = 0;
   2148	u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
   2149	int num_look_ahead = 1;
   2150	enum htc_endpoint_id id;
   2151	int n_fetched = 0;
   2152
   2153	INIT_LIST_HEAD(&comp_pktq);
   2154	*num_pkts = 0;
   2155
   2156	/*
   2157	 * On first entry copy the look_aheads into our temp array for
   2158	 * processing
   2159	 */
   2160	look_aheads[0] = msg_look_ahead;
   2161
   2162	while (true) {
   2163		/*
   2164		 * First lookahead sets the expected endpoint IDs for all
   2165		 * packets in a bundle.
   2166		 */
   2167		id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
   2168		endpoint = &target->endpoint[id];
   2169
   2170		if (id >= ENDPOINT_MAX) {
   2171			ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
   2172				   id);
   2173			status = -ENOMEM;
   2174			break;
   2175		}
   2176
   2177		INIT_LIST_HEAD(&rx_pktq);
   2178		INIT_LIST_HEAD(&comp_pktq);
   2179
   2180		/*
   2181		 * Try to allocate as many HTC RX packets indicated by the
   2182		 * look_aheads.
   2183		 */
   2184		status = ath6kl_htc_rx_alloc(target, look_aheads,
   2185					     num_look_ahead, endpoint,
   2186					     &rx_pktq);
   2187		if (status)
   2188			break;
   2189
   2190		if (get_queue_depth(&rx_pktq) >= 2)
   2191			/*
   2192			 * A recv bundle was detected, force IRQ status
   2193			 * re-check again
   2194			 */
   2195			target->chk_irq_status_cnt = 1;
   2196
   2197		n_fetched += get_queue_depth(&rx_pktq);
   2198
   2199		num_look_ahead = 0;
   2200
   2201		status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
   2202
   2203		if (!status)
   2204			ath6kl_htc_rx_chk_water_mark(endpoint);
   2205
   2206		/* Process fetched packets */
   2207		status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
   2208						       look_aheads,
   2209						       &num_look_ahead);
   2210
   2211		if (!num_look_ahead || status)
   2212			break;
   2213
   2214		/*
   2215		 * For SYNCH processing, if we get here, we are running
   2216		 * through the loop again due to a detected lookahead. Set
   2217		 * flag that we should re-check IRQ status registers again
   2218		 * before leaving IRQ processing, this can net better
   2219		 * performance in high throughput situations.
   2220		 */
   2221		target->chk_irq_status_cnt = 1;
   2222	}
   2223
   2224	if (status) {
   2225		if (status != -ECANCELED)
   2226			ath6kl_err("failed to get pending recv messages: %d\n",
   2227				   status);
   2228
   2229		/* cleanup any packets in sync completion queue */
   2230		list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
   2231			list_del(&packets->list);
   2232			htc_reclaim_rxbuf(target, packets,
   2233					  &target->endpoint[packets->endpoint]);
   2234		}
   2235
   2236		if (target->htc_flags & HTC_OP_STATE_STOPPING) {
   2237			ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
   2238			ath6kl_hif_rx_control(target->dev, false);
   2239		}
   2240	}
   2241
   2242	/*
   2243	 * Before leaving, check to see if host ran out of buffers and
   2244	 * needs to stop the receiver.
   2245	 */
   2246	if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
   2247		ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
   2248		ath6kl_hif_rx_control(target->dev, false);
   2249	}
   2250	*num_pkts = n_fetched;
   2251
   2252	return status;
   2253}
   2254
   2255/*
   2256 * Synchronously wait for a control message from the target,
   2257 * This function is used at initialization time ONLY.  At init messages
   2258 * on ENDPOINT 0 are expected.
   2259 */
   2260static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
   2261{
   2262	struct htc_packet *packet = NULL;
   2263	struct htc_frame_look_ahead look_ahead;
   2264
   2265	if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word,
   2266				       HTC_TARGET_RESPONSE_TIMEOUT))
   2267		return NULL;
   2268
   2269	ath6kl_dbg(ATH6KL_DBG_HTC,
   2270		   "htc rx wait ctrl look_ahead 0x%X\n", look_ahead.word);
   2271
   2272	if (look_ahead.eid != ENDPOINT_0)
   2273		return NULL;
   2274
   2275	packet = htc_get_control_buf(target, false);
   2276
   2277	if (!packet)
   2278		return NULL;
   2279
   2280	packet->info.rx.rx_flags = 0;
   2281	packet->info.rx.exp_hdr = look_ahead.word;
   2282	packet->act_len = le16_to_cpu(look_ahead.payld_len) + HTC_HDR_LENGTH;
   2283
   2284	if (packet->act_len > packet->buf_len)
   2285		goto fail_ctrl_rx;
   2286
   2287	/* we want synchronous operation */
   2288	packet->completion = NULL;
   2289
   2290	/* get the message from the device, this will block */
   2291	if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
   2292		goto fail_ctrl_rx;
   2293
   2294	trace_ath6kl_htc_rx(packet->status, packet->endpoint,
   2295			    packet->buf, packet->act_len);
   2296
   2297	/* process receive header */
   2298	packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
   2299
   2300	if (packet->status) {
   2301		ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
   2302			   packet->status);
   2303		goto fail_ctrl_rx;
   2304	}
   2305
   2306	return packet;
   2307
   2308fail_ctrl_rx:
   2309	if (packet != NULL) {
   2310		htc_rxpkt_reset(packet);
   2311		reclaim_rx_ctrl_buf(target, packet);
   2312	}
   2313
   2314	return NULL;
   2315}
   2316
   2317static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
   2318				  struct list_head *pkt_queue)
   2319{
   2320	struct htc_endpoint *endpoint;
   2321	struct htc_packet *first_pkt;
   2322	bool rx_unblock = false;
   2323	int status = 0, depth;
   2324
   2325	if (list_empty(pkt_queue))
   2326		return -ENOMEM;
   2327
   2328	first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
   2329
   2330	if (first_pkt->endpoint >= ENDPOINT_MAX)
   2331		return status;
   2332
   2333	depth = get_queue_depth(pkt_queue);
   2334
   2335	ath6kl_dbg(ATH6KL_DBG_HTC,
   2336		   "htc rx add multiple ep id %d cnt %d len %d\n",
   2337		first_pkt->endpoint, depth, first_pkt->buf_len);
   2338
   2339	endpoint = &target->endpoint[first_pkt->endpoint];
   2340
   2341	if (target->htc_flags & HTC_OP_STATE_STOPPING) {
   2342		struct htc_packet *packet, *tmp_pkt;
   2343
   2344		/* walk through queue and mark each one canceled */
   2345		list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
   2346			packet->status = -ECANCELED;
   2347			list_del(&packet->list);
   2348			ath6kl_htc_rx_complete(endpoint, packet);
   2349		}
   2350
   2351		return status;
   2352	}
   2353
   2354	spin_lock_bh(&target->rx_lock);
   2355
   2356	list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
   2357
   2358	/* check if we are blocked waiting for a new buffer */
   2359	if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
   2360		if (target->ep_waiting == first_pkt->endpoint) {
   2361			ath6kl_dbg(ATH6KL_DBG_HTC,
   2362				   "htc rx blocked on ep %d, unblocking\n",
   2363				   target->ep_waiting);
   2364			target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
   2365			target->ep_waiting = ENDPOINT_MAX;
   2366			rx_unblock = true;
   2367		}
   2368	}
   2369
   2370	spin_unlock_bh(&target->rx_lock);
   2371
   2372	if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
   2373		/* TODO : implement a buffer threshold count? */
   2374		ath6kl_hif_rx_control(target->dev, true);
   2375
   2376	return status;
   2377}
   2378
   2379static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
   2380{
   2381	struct htc_endpoint *endpoint;
   2382	struct htc_packet *packet, *tmp_pkt;
   2383	int i;
   2384
   2385	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
   2386		endpoint = &target->endpoint[i];
   2387		if (!endpoint->svc_id)
   2388			/* not in use.. */
   2389			continue;
   2390
   2391		spin_lock_bh(&target->rx_lock);
   2392		list_for_each_entry_safe(packet, tmp_pkt,
   2393					 &endpoint->rx_bufq, list) {
   2394			list_del(&packet->list);
   2395			spin_unlock_bh(&target->rx_lock);
   2396			ath6kl_dbg(ATH6KL_DBG_HTC,
   2397				   "htc rx flush pkt 0x%p  len %d  ep %d\n",
   2398				   packet, packet->buf_len,
   2399				   packet->endpoint);
   2400			/*
   2401			 * packets in rx_bufq of endpoint 0 have originally
   2402			 * been queued from target->free_ctrl_rxbuf where
   2403			 * packet and packet->buf_start are allocated
   2404			 * separately using kmalloc(). For other endpoint
   2405			 * rx_bufq, it is allocated as skb where packet is
   2406			 * skb->head. Take care of this difference while freeing
   2407			 * the memory.
   2408			 */
   2409			if (packet->endpoint == ENDPOINT_0) {
   2410				kfree(packet->buf_start);
   2411				kfree(packet);
   2412			} else {
   2413				dev_kfree_skb(packet->pkt_cntxt);
   2414			}
   2415			spin_lock_bh(&target->rx_lock);
   2416		}
   2417		spin_unlock_bh(&target->rx_lock);
   2418	}
   2419}
   2420
   2421static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
   2422			    struct htc_service_connect_req *conn_req,
   2423			    struct htc_service_connect_resp *conn_resp)
   2424{
   2425	struct htc_packet *rx_pkt = NULL;
   2426	struct htc_packet *tx_pkt = NULL;
   2427	struct htc_conn_service_resp *resp_msg;
   2428	struct htc_conn_service_msg *conn_msg;
   2429	struct htc_endpoint *endpoint;
   2430	enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
   2431	unsigned int max_msg_sz = 0;
   2432	int status = 0;
   2433	u16 msg_id;
   2434
   2435	ath6kl_dbg(ATH6KL_DBG_HTC,
   2436		   "htc connect service target 0x%p service id 0x%x\n",
   2437		   target, conn_req->svc_id);
   2438
   2439	if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
   2440		/* special case for pseudo control service */
   2441		assigned_ep = ENDPOINT_0;
   2442		max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
   2443	} else {
   2444		/* allocate a packet to send to the target */
   2445		tx_pkt = htc_get_control_buf(target, true);
   2446
   2447		if (!tx_pkt)
   2448			return -ENOMEM;
   2449
   2450		conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
   2451		memset(conn_msg, 0, sizeof(*conn_msg));
   2452		conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
   2453		conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
   2454		conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
   2455
   2456		set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
   2457				 sizeof(*conn_msg) + conn_msg->svc_meta_len,
   2458				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
   2459
   2460		/* we want synchronous operation */
   2461		tx_pkt->completion = NULL;
   2462		ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
   2463		status = ath6kl_htc_tx_issue(target, tx_pkt);
   2464
   2465		if (status)
   2466			goto fail_tx;
   2467
   2468		/* wait for response */
   2469		rx_pkt = htc_wait_for_ctrl_msg(target);
   2470
   2471		if (!rx_pkt) {
   2472			status = -ENOMEM;
   2473			goto fail_tx;
   2474		}
   2475
   2476		resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
   2477		msg_id = le16_to_cpu(resp_msg->msg_id);
   2478
   2479		if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
   2480		    (rx_pkt->act_len < sizeof(*resp_msg))) {
   2481			status = -ENOMEM;
   2482			goto fail_tx;
   2483		}
   2484
   2485		conn_resp->resp_code = resp_msg->status;
   2486		/* check response status */
   2487		if (resp_msg->status != HTC_SERVICE_SUCCESS) {
   2488			ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
   2489				   resp_msg->svc_id, resp_msg->status);
   2490			status = -ENOMEM;
   2491			goto fail_tx;
   2492		}
   2493
   2494		assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
   2495		max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
   2496	}
   2497
   2498	if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
   2499			 assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
   2500		status = -ENOMEM;
   2501		goto fail_tx;
   2502	}
   2503
   2504	endpoint = &target->endpoint[assigned_ep];
   2505	endpoint->eid = assigned_ep;
   2506	if (endpoint->svc_id) {
   2507		status = -ENOMEM;
   2508		goto fail_tx;
   2509	}
   2510
   2511	/* return assigned endpoint to caller */
   2512	conn_resp->endpoint = assigned_ep;
   2513	conn_resp->len_max = max_msg_sz;
   2514
   2515	/* setup the endpoint */
   2516
   2517	/* this marks the endpoint in use */
   2518	endpoint->svc_id = conn_req->svc_id;
   2519
   2520	endpoint->max_txq_depth = conn_req->max_txq_depth;
   2521	endpoint->len_max = max_msg_sz;
   2522	endpoint->ep_cb = conn_req->ep_cb;
   2523	endpoint->cred_dist.svc_id = conn_req->svc_id;
   2524	endpoint->cred_dist.htc_ep = endpoint;
   2525	endpoint->cred_dist.endpoint = assigned_ep;
   2526	endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
   2527
   2528	switch (endpoint->svc_id) {
   2529	case WMI_DATA_BK_SVC:
   2530		endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
   2531		break;
   2532	default:
   2533		endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
   2534		break;
   2535	}
   2536
   2537	if (conn_req->max_rxmsg_sz) {
   2538		/*
   2539		 * Override cred_per_msg calculation, this optimizes
   2540		 * the credit-low indications since the host will actually
   2541		 * issue smaller messages in the Send path.
   2542		 */
   2543		if (conn_req->max_rxmsg_sz > max_msg_sz) {
   2544			status = -ENOMEM;
   2545			goto fail_tx;
   2546		}
   2547		endpoint->cred_dist.cred_per_msg =
   2548		    conn_req->max_rxmsg_sz / target->tgt_cred_sz;
   2549	} else
   2550		endpoint->cred_dist.cred_per_msg =
   2551		    max_msg_sz / target->tgt_cred_sz;
   2552
   2553	if (!endpoint->cred_dist.cred_per_msg)
   2554		endpoint->cred_dist.cred_per_msg = 1;
   2555
   2556	/* save local connection flags */
   2557	endpoint->conn_flags = conn_req->flags;
   2558
   2559fail_tx:
   2560	if (tx_pkt)
   2561		htc_reclaim_txctrl_buf(target, tx_pkt);
   2562
   2563	if (rx_pkt) {
   2564		htc_rxpkt_reset(rx_pkt);
   2565		reclaim_rx_ctrl_buf(target, rx_pkt);
   2566	}
   2567
   2568	return status;
   2569}
   2570
   2571static void reset_ep_state(struct htc_target *target)
   2572{
   2573	struct htc_endpoint *endpoint;
   2574	int i;
   2575
   2576	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
   2577		endpoint = &target->endpoint[i];
   2578		memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
   2579		endpoint->svc_id = 0;
   2580		endpoint->len_max = 0;
   2581		endpoint->max_txq_depth = 0;
   2582		memset(&endpoint->ep_st, 0,
   2583		       sizeof(endpoint->ep_st));
   2584		INIT_LIST_HEAD(&endpoint->rx_bufq);
   2585		INIT_LIST_HEAD(&endpoint->txq);
   2586		endpoint->target = target;
   2587	}
   2588
   2589	/* reset distribution list */
   2590	/* FIXME: free existing entries */
   2591	INIT_LIST_HEAD(&target->cred_dist_list);
   2592}
   2593
   2594static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
   2595			     enum htc_endpoint_id endpoint)
   2596{
   2597	int num;
   2598
   2599	spin_lock_bh(&target->rx_lock);
   2600	num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
   2601	spin_unlock_bh(&target->rx_lock);
   2602	return num;
   2603}
   2604
   2605static void htc_setup_msg_bndl(struct htc_target *target)
   2606{
   2607	/* limit what HTC can handle */
   2608	target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
   2609				       target->msg_per_bndl_max);
   2610
   2611	if (ath6kl_hif_enable_scatter(target->dev->ar)) {
   2612		target->msg_per_bndl_max = 0;
   2613		return;
   2614	}
   2615
   2616	/* limit bundle what the device layer can handle */
   2617	target->msg_per_bndl_max = min(target->max_scat_entries,
   2618				       target->msg_per_bndl_max);
   2619
   2620	ath6kl_dbg(ATH6KL_DBG_BOOT,
   2621		   "htc bundling allowed msg_per_bndl_max %d\n",
   2622		   target->msg_per_bndl_max);
   2623
   2624	/* Max rx bundle size is limited by the max tx bundle size */
   2625	target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
   2626	/* Max tx bundle size if limited by the extended mbox address range */
   2627	target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
   2628				     target->max_xfer_szper_scatreq);
   2629
   2630	ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
   2631		   target->max_rx_bndl_sz, target->max_tx_bndl_sz);
   2632
   2633	if (target->max_tx_bndl_sz)
   2634		/* tx_bndl_mask is enabled per AC, each has 1 bit */
   2635		target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
   2636
   2637	if (target->max_rx_bndl_sz)
   2638		target->rx_bndl_enable = true;
   2639
   2640	if ((target->tgt_cred_sz % target->block_sz) != 0) {
   2641		ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
   2642			    target->tgt_cred_sz);
   2643
   2644		/*
   2645		 * Disallow send bundling since the credit size is
   2646		 * not aligned to a block size the I/O block
   2647		 * padding will spill into the next credit buffer
   2648		 * which is fatal.
   2649		 */
   2650		target->tx_bndl_mask = 0;
   2651	}
   2652}
   2653
   2654static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
   2655{
   2656	struct htc_packet *packet = NULL;
   2657	struct htc_ready_ext_msg *rdy_msg;
   2658	struct htc_service_connect_req connect;
   2659	struct htc_service_connect_resp resp;
   2660	int status;
   2661
   2662	/* we should be getting 1 control message that the target is ready */
   2663	packet = htc_wait_for_ctrl_msg(target);
   2664
   2665	if (!packet)
   2666		return -ENOMEM;
   2667
   2668	/* we controlled the buffer creation so it's properly aligned */
   2669	rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
   2670
   2671	if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
   2672	    (packet->act_len < sizeof(struct htc_ready_msg))) {
   2673		status = -ENOMEM;
   2674		goto fail_wait_target;
   2675	}
   2676
   2677	if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
   2678		status = -ENOMEM;
   2679		goto fail_wait_target;
   2680	}
   2681
   2682	target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
   2683	target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
   2684
   2685	ath6kl_dbg(ATH6KL_DBG_BOOT,
   2686		   "htc target ready credits %d size %d\n",
   2687		   target->tgt_creds, target->tgt_cred_sz);
   2688
   2689	/* check if this is an extended ready message */
   2690	if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
   2691		/* this is an extended message */
   2692		target->htc_tgt_ver = rdy_msg->htc_ver;
   2693		target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
   2694	} else {
   2695		/* legacy */
   2696		target->htc_tgt_ver = HTC_VERSION_2P0;
   2697		target->msg_per_bndl_max = 0;
   2698	}
   2699
   2700	ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
   2701		   (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
   2702		   target->htc_tgt_ver);
   2703
   2704	if (target->msg_per_bndl_max > 0)
   2705		htc_setup_msg_bndl(target);
   2706
   2707	/* setup our pseudo HTC control endpoint connection */
   2708	memset(&connect, 0, sizeof(connect));
   2709	memset(&resp, 0, sizeof(resp));
   2710	connect.ep_cb.rx = htc_ctrl_rx;
   2711	connect.ep_cb.rx_refill = NULL;
   2712	connect.ep_cb.tx_full = NULL;
   2713	connect.max_txq_depth = NUM_CONTROL_BUFFERS;
   2714	connect.svc_id = HTC_CTRL_RSVD_SVC;
   2715
   2716	/* connect fake service */
   2717	status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
   2718
   2719	if (status)
   2720		/*
   2721		 * FIXME: this call doesn't make sense, the caller should
   2722		 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
   2723		 */
   2724		ath6kl_hif_cleanup_scatter(target->dev->ar);
   2725
   2726fail_wait_target:
   2727	if (packet) {
   2728		htc_rxpkt_reset(packet);
   2729		reclaim_rx_ctrl_buf(target, packet);
   2730	}
   2731
   2732	return status;
   2733}
   2734
   2735/*
   2736 * Start HTC, enable interrupts and let the target know
   2737 * host has finished setup.
   2738 */
   2739static int ath6kl_htc_mbox_start(struct htc_target *target)
   2740{
   2741	struct htc_packet *packet;
   2742	int status;
   2743
   2744	memset(&target->dev->irq_proc_reg, 0,
   2745	       sizeof(target->dev->irq_proc_reg));
   2746
   2747	/* Disable interrupts at the chip level */
   2748	ath6kl_hif_disable_intrs(target->dev);
   2749
   2750	target->htc_flags = 0;
   2751	target->rx_st_flags = 0;
   2752
   2753	/* Push control receive buffers into htc control endpoint */
   2754	while ((packet = htc_get_control_buf(target, false)) != NULL) {
   2755		status = htc_add_rxbuf(target, packet);
   2756		if (status)
   2757			return status;
   2758	}
   2759
   2760	/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
   2761	ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
   2762			   target->tgt_creds);
   2763
   2764	dump_cred_dist_stats(target);
   2765
   2766	/* Indicate to the target of the setup completion */
   2767	status = htc_setup_tx_complete(target);
   2768
   2769	if (status)
   2770		return status;
   2771
   2772	/* unmask interrupts */
   2773	status = ath6kl_hif_unmask_intrs(target->dev);
   2774
   2775	if (status)
   2776		ath6kl_htc_mbox_stop(target);
   2777
   2778	return status;
   2779}
   2780
   2781static int ath6kl_htc_reset(struct htc_target *target)
   2782{
   2783	u32 block_size, ctrl_bufsz;
   2784	struct htc_packet *packet;
   2785	int i;
   2786
   2787	reset_ep_state(target);
   2788
   2789	block_size = target->dev->ar->mbox_info.block_size;
   2790
   2791	ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
   2792		      (block_size + HTC_HDR_LENGTH) :
   2793		      (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
   2794
   2795	for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
   2796		packet = kzalloc(sizeof(*packet), GFP_KERNEL);
   2797		if (!packet)
   2798			return -ENOMEM;
   2799
   2800		packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
   2801		if (!packet->buf_start) {
   2802			kfree(packet);
   2803			return -ENOMEM;
   2804		}
   2805
   2806		packet->buf_len = ctrl_bufsz;
   2807		if (i < NUM_CONTROL_RX_BUFFERS) {
   2808			packet->act_len = 0;
   2809			packet->buf = packet->buf_start;
   2810			packet->endpoint = ENDPOINT_0;
   2811			list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
   2812		} else {
   2813			list_add_tail(&packet->list, &target->free_ctrl_txbuf);
   2814		}
   2815	}
   2816
   2817	return 0;
   2818}
   2819
   2820/* htc_stop: stop interrupt reception, and flush all queued buffers */
   2821static void ath6kl_htc_mbox_stop(struct htc_target *target)
   2822{
   2823	spin_lock_bh(&target->htc_lock);
   2824	target->htc_flags |= HTC_OP_STATE_STOPPING;
   2825	spin_unlock_bh(&target->htc_lock);
   2826
   2827	/*
   2828	 * Masking interrupts is a synchronous operation, when this
   2829	 * function returns all pending HIF I/O has completed, we can
   2830	 * safely flush the queues.
   2831	 */
   2832	ath6kl_hif_mask_intrs(target->dev);
   2833
   2834	ath6kl_htc_flush_txep_all(target);
   2835
   2836	ath6kl_htc_mbox_flush_rx_buf(target);
   2837
   2838	ath6kl_htc_reset(target);
   2839}
   2840
   2841static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
   2842{
   2843	struct htc_target *target = NULL;
   2844	int status = 0;
   2845
   2846	target = kzalloc(sizeof(*target), GFP_KERNEL);
   2847	if (!target) {
   2848		ath6kl_err("unable to allocate memory\n");
   2849		return NULL;
   2850	}
   2851
   2852	target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
   2853	if (!target->dev) {
   2854		ath6kl_err("unable to allocate memory\n");
   2855		kfree(target);
   2856		return NULL;
   2857	}
   2858
   2859	spin_lock_init(&target->htc_lock);
   2860	spin_lock_init(&target->rx_lock);
   2861	spin_lock_init(&target->tx_lock);
   2862
   2863	INIT_LIST_HEAD(&target->free_ctrl_txbuf);
   2864	INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
   2865	INIT_LIST_HEAD(&target->cred_dist_list);
   2866
   2867	target->dev->ar = ar;
   2868	target->dev->htc_cnxt = target;
   2869	target->ep_waiting = ENDPOINT_MAX;
   2870
   2871	status = ath6kl_hif_setup(target->dev);
   2872	if (status)
   2873		goto err_htc_cleanup;
   2874
   2875	status = ath6kl_htc_reset(target);
   2876	if (status)
   2877		goto err_htc_cleanup;
   2878
   2879	return target;
   2880
   2881err_htc_cleanup:
   2882	ath6kl_htc_mbox_cleanup(target);
   2883
   2884	return NULL;
   2885}
   2886
   2887/* cleanup the HTC instance */
   2888static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
   2889{
   2890	struct htc_packet *packet, *tmp_packet;
   2891
   2892	ath6kl_hif_cleanup_scatter(target->dev->ar);
   2893
   2894	list_for_each_entry_safe(packet, tmp_packet,
   2895				 &target->free_ctrl_txbuf, list) {
   2896		list_del(&packet->list);
   2897		kfree(packet->buf_start);
   2898		kfree(packet);
   2899	}
   2900
   2901	list_for_each_entry_safe(packet, tmp_packet,
   2902				 &target->free_ctrl_rxbuf, list) {
   2903		list_del(&packet->list);
   2904		kfree(packet->buf_start);
   2905		kfree(packet);
   2906	}
   2907
   2908	kfree(target->dev);
   2909	kfree(target);
   2910}
   2911
   2912static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
   2913	.create = ath6kl_htc_mbox_create,
   2914	.wait_target = ath6kl_htc_mbox_wait_target,
   2915	.start = ath6kl_htc_mbox_start,
   2916	.conn_service = ath6kl_htc_mbox_conn_service,
   2917	.tx = ath6kl_htc_mbox_tx,
   2918	.stop = ath6kl_htc_mbox_stop,
   2919	.cleanup = ath6kl_htc_mbox_cleanup,
   2920	.flush_txep = ath6kl_htc_mbox_flush_txep,
   2921	.flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
   2922	.activity_changed = ath6kl_htc_mbox_activity_changed,
   2923	.get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
   2924	.add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
   2925	.credit_setup = ath6kl_htc_mbox_credit_setup,
   2926};
   2927
   2928void ath6kl_htc_mbox_attach(struct ath6kl *ar)
   2929{
   2930	ar->htc_ops = &ath6kl_htc_mbox_ops;
   2931}