cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

txrx.c (39464B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers
      4 *
      5 * Copyright (c) 2010, ST-Ericsson
      6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
      7 */
      8
      9#include <net/mac80211.h>
     10#include <linux/etherdevice.h>
     11#include <linux/skbuff.h>
     12
     13#include "cw1200.h"
     14#include "wsm.h"
     15#include "bh.h"
     16#include "sta.h"
     17#include "debug.h"
     18
     19#define CW1200_INVALID_RATE_ID (0xFF)
     20
     21static int cw1200_handle_action_rx(struct cw1200_common *priv,
     22				   struct sk_buff *skb);
     23static const struct ieee80211_rate *
     24cw1200_get_tx_rate(const struct cw1200_common *priv,
     25		   const struct ieee80211_tx_rate *rate);
     26
     27/* ******************************************************************** */
     28/* TX queue lock / unlock						*/
     29
     30static inline void cw1200_tx_queues_lock(struct cw1200_common *priv)
     31{
     32	int i;
     33	for (i = 0; i < 4; ++i)
     34		cw1200_queue_lock(&priv->tx_queue[i]);
     35}
     36
     37static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv)
     38{
     39	int i;
     40	for (i = 0; i < 4; ++i)
     41		cw1200_queue_unlock(&priv->tx_queue[i]);
     42}
     43
     44/* ******************************************************************** */
     45/* TX policy cache implementation					*/
     46
     47static void tx_policy_dump(struct tx_policy *policy)
     48{
     49	pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n",
     50		 policy->raw[0] & 0x0F,  policy->raw[0] >> 4,
     51		 policy->raw[1] & 0x0F,  policy->raw[1] >> 4,
     52		 policy->raw[2] & 0x0F,  policy->raw[2] >> 4,
     53		 policy->raw[3] & 0x0F,  policy->raw[3] >> 4,
     54		 policy->raw[4] & 0x0F,  policy->raw[4] >> 4,
     55		 policy->raw[5] & 0x0F,  policy->raw[5] >> 4,
     56		 policy->raw[6] & 0x0F,  policy->raw[6] >> 4,
     57		 policy->raw[7] & 0x0F,  policy->raw[7] >> 4,
     58		 policy->raw[8] & 0x0F,  policy->raw[8] >> 4,
     59		 policy->raw[9] & 0x0F,  policy->raw[9] >> 4,
     60		 policy->raw[10] & 0x0F,  policy->raw[10] >> 4,
     61		 policy->raw[11] & 0x0F,  policy->raw[11] >> 4,
     62		 policy->defined);
     63}
     64
     65static void tx_policy_build(const struct cw1200_common *priv,
     66	/* [out] */ struct tx_policy *policy,
     67	struct ieee80211_tx_rate *rates, size_t count)
     68{
     69	int i, j;
     70	unsigned limit = priv->short_frame_max_tx_count;
     71	unsigned total = 0;
     72	BUG_ON(rates[0].idx < 0);
     73	memset(policy, 0, sizeof(*policy));
     74
     75	/* Sort rates in descending order. */
     76	for (i = 1; i < count; ++i) {
     77		if (rates[i].idx < 0) {
     78			count = i;
     79			break;
     80		}
     81		if (rates[i].idx > rates[i - 1].idx) {
     82			struct ieee80211_tx_rate tmp = rates[i - 1];
     83			rates[i - 1] = rates[i];
     84			rates[i] = tmp;
     85		}
     86	}
     87
     88	/* Eliminate duplicates. */
     89	total = rates[0].count;
     90	for (i = 0, j = 1; j < count; ++j) {
     91		if (rates[j].idx == rates[i].idx) {
     92			rates[i].count += rates[j].count;
     93		} else if (rates[j].idx > rates[i].idx) {
     94			break;
     95		} else {
     96			++i;
     97			if (i != j)
     98				rates[i] = rates[j];
     99		}
    100		total += rates[j].count;
    101	}
    102	count = i + 1;
    103
    104	/* Re-fill policy trying to keep every requested rate and with
    105	 * respect to the global max tx retransmission count.
    106	 */
    107	if (limit < count)
    108		limit = count;
    109	if (total > limit) {
    110		for (i = 0; i < count; ++i) {
    111			int left = count - i - 1;
    112			if (rates[i].count > limit - left)
    113				rates[i].count = limit - left;
    114			limit -= rates[i].count;
    115		}
    116	}
    117
    118	/* HACK!!! Device has problems (at least) switching from
    119	 * 54Mbps CTS to 1Mbps. This switch takes enormous amount
    120	 * of time (100-200 ms), leading to valuable throughput drop.
    121	 * As a workaround, additional g-rates are injected to the
    122	 * policy.
    123	 */
    124	if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
    125	    rates[0].idx > 4 && rates[0].count > 2 &&
    126	    rates[1].idx < 2) {
    127		int mid_rate = (rates[0].idx + 4) >> 1;
    128
    129		/* Decrease number of retries for the initial rate */
    130		rates[0].count -= 2;
    131
    132		if (mid_rate != 4) {
    133			/* Keep fallback rate at 1Mbps. */
    134			rates[3] = rates[1];
    135
    136			/* Inject 1 transmission on lowest g-rate */
    137			rates[2].idx = 4;
    138			rates[2].count = 1;
    139			rates[2].flags = rates[1].flags;
    140
    141			/* Inject 1 transmission on mid-rate */
    142			rates[1].idx = mid_rate;
    143			rates[1].count = 1;
    144
    145			/* Fallback to 1 Mbps is a really bad thing,
    146			 * so let's try to increase probability of
    147			 * successful transmission on the lowest g rate
    148			 * even more
    149			 */
    150			if (rates[0].count >= 3) {
    151				--rates[0].count;
    152				++rates[2].count;
    153			}
    154
    155			/* Adjust amount of rates defined */
    156			count += 2;
    157		} else {
    158			/* Keep fallback rate at 1Mbps. */
    159			rates[2] = rates[1];
    160
    161			/* Inject 2 transmissions on lowest g-rate */
    162			rates[1].idx = 4;
    163			rates[1].count = 2;
    164
    165			/* Adjust amount of rates defined */
    166			count += 1;
    167		}
    168	}
    169
    170	policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1;
    171
    172	for (i = 0; i < count; ++i) {
    173		register unsigned rateid, off, shift, retries;
    174
    175		rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value;
    176		off = rateid >> 3;		/* eq. rateid / 8 */
    177		shift = (rateid & 0x07) << 2;	/* eq. (rateid % 8) * 4 */
    178
    179		retries = rates[i].count;
    180		if (retries > 0x0F) {
    181			rates[i].count = 0x0f;
    182			retries = 0x0F;
    183		}
    184		policy->tbl[off] |= __cpu_to_le32(retries << shift);
    185		policy->retry_count += retries;
    186	}
    187
    188	pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n",
    189		 count,
    190		 rates[0].idx, rates[0].count,
    191		 rates[1].idx, rates[1].count,
    192		 rates[2].idx, rates[2].count,
    193		 rates[3].idx, rates[3].count);
    194}
    195
    196static inline bool tx_policy_is_equal(const struct tx_policy *wanted,
    197					const struct tx_policy *cached)
    198{
    199	size_t count = wanted->defined >> 1;
    200	if (wanted->defined > cached->defined)
    201		return false;
    202	if (count) {
    203		if (memcmp(wanted->raw, cached->raw, count))
    204			return false;
    205	}
    206	if (wanted->defined & 1) {
    207		if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F))
    208			return false;
    209	}
    210	return true;
    211}
    212
    213static int tx_policy_find(struct tx_policy_cache *cache,
    214				const struct tx_policy *wanted)
    215{
    216	/* O(n) complexity. Not so good, but there's only 8 entries in
    217	 * the cache.
    218	 * Also lru helps to reduce search time.
    219	 */
    220	struct tx_policy_cache_entry *it;
    221	/* First search for policy in "used" list */
    222	list_for_each_entry(it, &cache->used, link) {
    223		if (tx_policy_is_equal(wanted, &it->policy))
    224			return it - cache->cache;
    225	}
    226	/* Then - in "free list" */
    227	list_for_each_entry(it, &cache->free, link) {
    228		if (tx_policy_is_equal(wanted, &it->policy))
    229			return it - cache->cache;
    230	}
    231	return -1;
    232}
    233
    234static inline void tx_policy_use(struct tx_policy_cache *cache,
    235				 struct tx_policy_cache_entry *entry)
    236{
    237	++entry->policy.usage_count;
    238	list_move(&entry->link, &cache->used);
    239}
    240
    241static inline int tx_policy_release(struct tx_policy_cache *cache,
    242				    struct tx_policy_cache_entry *entry)
    243{
    244	int ret = --entry->policy.usage_count;
    245	if (!ret)
    246		list_move(&entry->link, &cache->free);
    247	return ret;
    248}
    249
    250void tx_policy_clean(struct cw1200_common *priv)
    251{
    252	int idx, locked;
    253	struct tx_policy_cache *cache = &priv->tx_policy_cache;
    254	struct tx_policy_cache_entry *entry;
    255
    256	cw1200_tx_queues_lock(priv);
    257	spin_lock_bh(&cache->lock);
    258	locked = list_empty(&cache->free);
    259
    260	for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) {
    261		entry = &cache->cache[idx];
    262		/* Policy usage count should be 0 at this time as all queues
    263		   should be empty
    264		 */
    265		if (WARN_ON(entry->policy.usage_count)) {
    266			entry->policy.usage_count = 0;
    267			list_move(&entry->link, &cache->free);
    268		}
    269		memset(&entry->policy, 0, sizeof(entry->policy));
    270	}
    271	if (locked)
    272		cw1200_tx_queues_unlock(priv);
    273
    274	cw1200_tx_queues_unlock(priv);
    275	spin_unlock_bh(&cache->lock);
    276}
    277
    278/* ******************************************************************** */
    279/* External TX policy cache API						*/
    280
    281void tx_policy_init(struct cw1200_common *priv)
    282{
    283	struct tx_policy_cache *cache = &priv->tx_policy_cache;
    284	int i;
    285
    286	memset(cache, 0, sizeof(*cache));
    287
    288	spin_lock_init(&cache->lock);
    289	INIT_LIST_HEAD(&cache->used);
    290	INIT_LIST_HEAD(&cache->free);
    291
    292	for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i)
    293		list_add(&cache->cache[i].link, &cache->free);
    294}
    295
    296static int tx_policy_get(struct cw1200_common *priv,
    297		  struct ieee80211_tx_rate *rates,
    298		  size_t count, bool *renew)
    299{
    300	int idx;
    301	struct tx_policy_cache *cache = &priv->tx_policy_cache;
    302	struct tx_policy wanted;
    303
    304	tx_policy_build(priv, &wanted, rates, count);
    305
    306	spin_lock_bh(&cache->lock);
    307	if (WARN_ON_ONCE(list_empty(&cache->free))) {
    308		spin_unlock_bh(&cache->lock);
    309		return CW1200_INVALID_RATE_ID;
    310	}
    311	idx = tx_policy_find(cache, &wanted);
    312	if (idx >= 0) {
    313		pr_debug("[TX policy] Used TX policy: %d\n", idx);
    314		*renew = false;
    315	} else {
    316		struct tx_policy_cache_entry *entry;
    317		*renew = true;
    318		/* If policy is not found create a new one
    319		 * using the oldest entry in "free" list
    320		 */
    321		entry = list_entry(cache->free.prev,
    322			struct tx_policy_cache_entry, link);
    323		entry->policy = wanted;
    324		idx = entry - cache->cache;
    325		pr_debug("[TX policy] New TX policy: %d\n", idx);
    326		tx_policy_dump(&entry->policy);
    327	}
    328	tx_policy_use(cache, &cache->cache[idx]);
    329	if (list_empty(&cache->free)) {
    330		/* Lock TX queues. */
    331		cw1200_tx_queues_lock(priv);
    332	}
    333	spin_unlock_bh(&cache->lock);
    334	return idx;
    335}
    336
    337static void tx_policy_put(struct cw1200_common *priv, int idx)
    338{
    339	int usage, locked;
    340	struct tx_policy_cache *cache = &priv->tx_policy_cache;
    341
    342	spin_lock_bh(&cache->lock);
    343	locked = list_empty(&cache->free);
    344	usage = tx_policy_release(cache, &cache->cache[idx]);
    345	if (locked && !usage) {
    346		/* Unlock TX queues. */
    347		cw1200_tx_queues_unlock(priv);
    348	}
    349	spin_unlock_bh(&cache->lock);
    350}
    351
    352static int tx_policy_upload(struct cw1200_common *priv)
    353{
    354	struct tx_policy_cache *cache = &priv->tx_policy_cache;
    355	int i;
    356	struct wsm_set_tx_rate_retry_policy arg = {
    357		.num = 0,
    358	};
    359	spin_lock_bh(&cache->lock);
    360
    361	/* Upload only modified entries. */
    362	for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) {
    363		struct tx_policy *src = &cache->cache[i].policy;
    364		if (src->retry_count && !src->uploaded) {
    365			struct wsm_tx_rate_retry_policy *dst =
    366				&arg.tbl[arg.num];
    367			dst->index = i;
    368			dst->short_retries = priv->short_frame_max_tx_count;
    369			dst->long_retries = priv->long_frame_max_tx_count;
    370
    371			dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED |
    372				WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT;
    373			memcpy(dst->rate_count_indices, src->tbl,
    374			       sizeof(dst->rate_count_indices));
    375			src->uploaded = 1;
    376			++arg.num;
    377		}
    378	}
    379	spin_unlock_bh(&cache->lock);
    380	cw1200_debug_tx_cache_miss(priv);
    381	pr_debug("[TX policy] Upload %d policies\n", arg.num);
    382	return wsm_set_tx_rate_retry_policy(priv, &arg);
    383}
    384
    385void tx_policy_upload_work(struct work_struct *work)
    386{
    387	struct cw1200_common *priv =
    388		container_of(work, struct cw1200_common, tx_policy_upload_work);
    389
    390	pr_debug("[TX] TX policy upload.\n");
    391	tx_policy_upload(priv);
    392
    393	wsm_unlock_tx(priv);
    394	cw1200_tx_queues_unlock(priv);
    395}
    396
    397/* ******************************************************************** */
    398/* cw1200 TX implementation						*/
    399
    400struct cw1200_txinfo {
    401	struct sk_buff *skb;
    402	unsigned queue;
    403	struct ieee80211_tx_info *tx_info;
    404	const struct ieee80211_rate *rate;
    405	struct ieee80211_hdr *hdr;
    406	size_t hdrlen;
    407	const u8 *da;
    408	struct cw1200_sta_priv *sta_priv;
    409	struct ieee80211_sta *sta;
    410	struct cw1200_txpriv txpriv;
    411};
    412
    413u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
    414{
    415	u32 ret = 0;
    416	int i;
    417	for (i = 0; i < 32; ++i) {
    418		if (rates & BIT(i))
    419			ret |= BIT(priv->rates[i].hw_value);
    420	}
    421	return ret;
    422}
    423
    424static const struct ieee80211_rate *
    425cw1200_get_tx_rate(const struct cw1200_common *priv,
    426		   const struct ieee80211_tx_rate *rate)
    427{
    428	if (rate->idx < 0)
    429		return NULL;
    430	if (rate->flags & IEEE80211_TX_RC_MCS)
    431		return &priv->mcs_rates[rate->idx];
    432	return &priv->hw->wiphy->bands[priv->channel->band]->
    433		bitrates[rate->idx];
    434}
    435
    436static int
    437cw1200_tx_h_calc_link_ids(struct cw1200_common *priv,
    438			  struct cw1200_txinfo *t)
    439{
    440	if (t->sta && t->sta_priv->link_id)
    441		t->txpriv.raw_link_id =
    442				t->txpriv.link_id =
    443				t->sta_priv->link_id;
    444	else if (priv->mode != NL80211_IFTYPE_AP)
    445		t->txpriv.raw_link_id =
    446				t->txpriv.link_id = 0;
    447	else if (is_multicast_ether_addr(t->da)) {
    448		if (priv->enable_beacon) {
    449			t->txpriv.raw_link_id = 0;
    450			t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
    451		} else {
    452			t->txpriv.raw_link_id = 0;
    453			t->txpriv.link_id = 0;
    454		}
    455	} else {
    456		t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
    457		if (!t->txpriv.link_id)
    458			t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
    459		if (!t->txpriv.link_id) {
    460			wiphy_err(priv->hw->wiphy,
    461				  "No more link IDs available.\n");
    462			return -ENOENT;
    463		}
    464		t->txpriv.raw_link_id = t->txpriv.link_id;
    465	}
    466	if (t->txpriv.raw_link_id)
    467		priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
    468				jiffies;
    469	if (t->sta && (t->sta->uapsd_queues & BIT(t->queue)))
    470		t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
    471	return 0;
    472}
    473
    474static void
    475cw1200_tx_h_pm(struct cw1200_common *priv,
    476	       struct cw1200_txinfo *t)
    477{
    478	if (ieee80211_is_auth(t->hdr->frame_control)) {
    479		u32 mask = ~BIT(t->txpriv.raw_link_id);
    480		spin_lock_bh(&priv->ps_state_lock);
    481		priv->sta_asleep_mask &= mask;
    482		priv->pspoll_mask &= mask;
    483		spin_unlock_bh(&priv->ps_state_lock);
    484	}
    485}
    486
    487static void
    488cw1200_tx_h_calc_tid(struct cw1200_common *priv,
    489		     struct cw1200_txinfo *t)
    490{
    491	if (ieee80211_is_data_qos(t->hdr->frame_control)) {
    492		u8 *qos = ieee80211_get_qos_ctl(t->hdr);
    493		t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
    494	} else if (ieee80211_is_data(t->hdr->frame_control)) {
    495		t->txpriv.tid = 0;
    496	}
    497}
    498
    499static int
    500cw1200_tx_h_crypt(struct cw1200_common *priv,
    501		  struct cw1200_txinfo *t)
    502{
    503	if (!t->tx_info->control.hw_key ||
    504	    !ieee80211_has_protected(t->hdr->frame_control))
    505		return 0;
    506
    507	t->hdrlen += t->tx_info->control.hw_key->iv_len;
    508	skb_put(t->skb, t->tx_info->control.hw_key->icv_len);
    509
    510	if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
    511		skb_put(t->skb, 8); /* MIC space */
    512
    513	return 0;
    514}
    515
    516static int
    517cw1200_tx_h_align(struct cw1200_common *priv,
    518		  struct cw1200_txinfo *t,
    519		  u8 *flags)
    520{
    521	size_t offset = (size_t)t->skb->data & 3;
    522
    523	if (!offset)
    524		return 0;
    525
    526	if (offset & 1) {
    527		wiphy_err(priv->hw->wiphy,
    528			  "Bug: attempt to transmit a frame with wrong alignment: %zu\n",
    529			  offset);
    530		return -EINVAL;
    531	}
    532
    533	if (skb_headroom(t->skb) < offset) {
    534		wiphy_err(priv->hw->wiphy,
    535			  "Bug: no space allocated for DMA alignment. headroom: %d\n",
    536			  skb_headroom(t->skb));
    537		return -ENOMEM;
    538	}
    539	skb_push(t->skb, offset);
    540	t->hdrlen += offset;
    541	t->txpriv.offset += offset;
    542	*flags |= WSM_TX_2BYTES_SHIFT;
    543	cw1200_debug_tx_align(priv);
    544	return 0;
    545}
    546
    547static int
    548cw1200_tx_h_action(struct cw1200_common *priv,
    549		   struct cw1200_txinfo *t)
    550{
    551	struct ieee80211_mgmt *mgmt =
    552		(struct ieee80211_mgmt *)t->hdr;
    553	if (ieee80211_is_action(t->hdr->frame_control) &&
    554	    mgmt->u.action.category == WLAN_CATEGORY_BACK)
    555		return 1;
    556	else
    557		return 0;
    558}
    559
    560/* Add WSM header */
    561static struct wsm_tx *
    562cw1200_tx_h_wsm(struct cw1200_common *priv,
    563		struct cw1200_txinfo *t)
    564{
    565	struct wsm_tx *wsm;
    566
    567	if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
    568		wiphy_err(priv->hw->wiphy,
    569			  "Bug: no space allocated for WSM header. headroom: %d\n",
    570			  skb_headroom(t->skb));
    571		return NULL;
    572	}
    573
    574	wsm = skb_push(t->skb, sizeof(struct wsm_tx));
    575	t->txpriv.offset += sizeof(struct wsm_tx);
    576	memset(wsm, 0, sizeof(*wsm));
    577	wsm->hdr.len = __cpu_to_le16(t->skb->len);
    578	wsm->hdr.id = __cpu_to_le16(0x0004);
    579	wsm->queue_id = wsm_queue_id_to_wsm(t->queue);
    580	return wsm;
    581}
    582
    583/* BT Coex specific handling */
    584static void
    585cw1200_tx_h_bt(struct cw1200_common *priv,
    586	       struct cw1200_txinfo *t,
    587	       struct wsm_tx *wsm)
    588{
    589	u8 priority = 0;
    590
    591	if (!priv->bt_present)
    592		return;
    593
    594	if (ieee80211_is_nullfunc(t->hdr->frame_control)) {
    595		priority = WSM_EPTA_PRIORITY_MGT;
    596	} else if (ieee80211_is_data(t->hdr->frame_control)) {
    597		/* Skip LLC SNAP header (+6) */
    598		u8 *payload = &t->skb->data[t->hdrlen];
    599		__be16 *ethertype = (__be16 *)&payload[6];
    600		if (be16_to_cpu(*ethertype) == ETH_P_PAE)
    601			priority = WSM_EPTA_PRIORITY_EAPOL;
    602	} else if (ieee80211_is_assoc_req(t->hdr->frame_control) ||
    603		ieee80211_is_reassoc_req(t->hdr->frame_control)) {
    604		struct ieee80211_mgmt *mgt_frame =
    605				(struct ieee80211_mgmt *)t->hdr;
    606
    607		if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) <
    608						priv->listen_interval) {
    609			pr_debug("Modified Listen Interval to %d from %d\n",
    610				 priv->listen_interval,
    611				 mgt_frame->u.assoc_req.listen_interval);
    612			/* Replace listen interval derieved from
    613			 * the one read from SDD
    614			 */
    615			mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval);
    616		}
    617	}
    618
    619	if (!priority) {
    620		if (ieee80211_is_action(t->hdr->frame_control))
    621			priority = WSM_EPTA_PRIORITY_ACTION;
    622		else if (ieee80211_is_mgmt(t->hdr->frame_control))
    623			priority = WSM_EPTA_PRIORITY_MGT;
    624		else if (wsm->queue_id == WSM_QUEUE_VOICE)
    625			priority = WSM_EPTA_PRIORITY_VOICE;
    626		else if (wsm->queue_id == WSM_QUEUE_VIDEO)
    627			priority = WSM_EPTA_PRIORITY_VIDEO;
    628		else
    629			priority = WSM_EPTA_PRIORITY_DATA;
    630	}
    631
    632	pr_debug("[TX] EPTA priority %d.\n", priority);
    633
    634	wsm->flags |= priority << 1;
    635}
    636
    637static int
    638cw1200_tx_h_rate_policy(struct cw1200_common *priv,
    639			struct cw1200_txinfo *t,
    640			struct wsm_tx *wsm)
    641{
    642	bool tx_policy_renew = false;
    643
    644	t->txpriv.rate_id = tx_policy_get(priv,
    645		t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
    646		&tx_policy_renew);
    647	if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID)
    648		return -EFAULT;
    649
    650	wsm->flags |= t->txpriv.rate_id << 4;
    651
    652	t->rate = cw1200_get_tx_rate(priv,
    653		&t->tx_info->control.rates[0]);
    654	wsm->max_tx_rate = t->rate->hw_value;
    655	if (t->rate->flags & IEEE80211_TX_RC_MCS) {
    656		if (cw1200_ht_greenfield(&priv->ht_info))
    657			wsm->ht_tx_parameters |=
    658				__cpu_to_le32(WSM_HT_TX_GREENFIELD);
    659		else
    660			wsm->ht_tx_parameters |=
    661				__cpu_to_le32(WSM_HT_TX_MIXED);
    662	}
    663
    664	if (tx_policy_renew) {
    665		pr_debug("[TX] TX policy renew.\n");
    666		/* It's not so optimal to stop TX queues every now and then.
    667		 * Better to reimplement task scheduling with
    668		 * a counter. TODO.
    669		 */
    670		wsm_lock_tx_async(priv);
    671		cw1200_tx_queues_lock(priv);
    672		if (queue_work(priv->workqueue,
    673			       &priv->tx_policy_upload_work) <= 0) {
    674			cw1200_tx_queues_unlock(priv);
    675			wsm_unlock_tx(priv);
    676		}
    677	}
    678	return 0;
    679}
    680
    681static bool
    682cw1200_tx_h_pm_state(struct cw1200_common *priv,
    683		     struct cw1200_txinfo *t)
    684{
    685	int was_buffered = 1;
    686
    687	if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
    688	    !priv->buffered_multicasts) {
    689		priv->buffered_multicasts = true;
    690		if (priv->sta_asleep_mask)
    691			queue_work(priv->workqueue,
    692				   &priv->multicast_start_work);
    693	}
    694
    695	if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
    696		was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++;
    697
    698	return !was_buffered;
    699}
    700
    701/* ******************************************************************** */
    702
    703void cw1200_tx(struct ieee80211_hw *dev,
    704	       struct ieee80211_tx_control *control,
    705	       struct sk_buff *skb)
    706{
    707	struct cw1200_common *priv = dev->priv;
    708	struct cw1200_txinfo t = {
    709		.skb = skb,
    710		.queue = skb_get_queue_mapping(skb),
    711		.tx_info = IEEE80211_SKB_CB(skb),
    712		.hdr = (struct ieee80211_hdr *)skb->data,
    713		.txpriv.tid = CW1200_MAX_TID,
    714		.txpriv.rate_id = CW1200_INVALID_RATE_ID,
    715	};
    716	struct ieee80211_sta *sta;
    717	struct wsm_tx *wsm;
    718	bool tid_update = false;
    719	u8 flags = 0;
    720	int ret;
    721
    722	if (priv->bh_error)
    723		goto drop;
    724
    725	t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
    726	t.da = ieee80211_get_DA(t.hdr);
    727	if (control) {
    728		t.sta = control->sta;
    729		t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv;
    730	}
    731
    732	if (WARN_ON(t.queue >= 4))
    733		goto drop;
    734
    735	ret = cw1200_tx_h_calc_link_ids(priv, &t);
    736	if (ret)
    737		goto drop;
    738
    739	pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n",
    740		 skb->len, t.queue, t.txpriv.link_id,
    741		 t.txpriv.raw_link_id);
    742
    743	cw1200_tx_h_pm(priv, &t);
    744	cw1200_tx_h_calc_tid(priv, &t);
    745	ret = cw1200_tx_h_crypt(priv, &t);
    746	if (ret)
    747		goto drop;
    748	ret = cw1200_tx_h_align(priv, &t, &flags);
    749	if (ret)
    750		goto drop;
    751	ret = cw1200_tx_h_action(priv, &t);
    752	if (ret)
    753		goto drop;
    754	wsm = cw1200_tx_h_wsm(priv, &t);
    755	if (!wsm) {
    756		ret = -ENOMEM;
    757		goto drop;
    758	}
    759	wsm->flags |= flags;
    760	cw1200_tx_h_bt(priv, &t, wsm);
    761	ret = cw1200_tx_h_rate_policy(priv, &t, wsm);
    762	if (ret)
    763		goto drop;
    764
    765	rcu_read_lock();
    766	sta = rcu_dereference(t.sta);
    767
    768	spin_lock_bh(&priv->ps_state_lock);
    769	{
    770		tid_update = cw1200_tx_h_pm_state(priv, &t);
    771		BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
    772					t.skb, &t.txpriv));
    773	}
    774	spin_unlock_bh(&priv->ps_state_lock);
    775
    776	if (tid_update && sta)
    777		ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
    778
    779	rcu_read_unlock();
    780
    781	cw1200_bh_wakeup(priv);
    782
    783	return;
    784
    785drop:
    786	cw1200_skb_dtor(priv, skb, &t.txpriv);
    787	return;
    788}
    789
    790/* ******************************************************************** */
    791
    792static int cw1200_handle_action_rx(struct cw1200_common *priv,
    793				   struct sk_buff *skb)
    794{
    795	struct ieee80211_mgmt *mgmt = (void *)skb->data;
    796
    797	/* Filter block ACK negotiation: fully controlled by firmware */
    798	if (mgmt->u.action.category == WLAN_CATEGORY_BACK)
    799		return 1;
    800
    801	return 0;
    802}
    803
    804static int cw1200_handle_pspoll(struct cw1200_common *priv,
    805				struct sk_buff *skb)
    806{
    807	struct ieee80211_sta *sta;
    808	struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
    809	int link_id = 0;
    810	u32 pspoll_mask = 0;
    811	int drop = 1;
    812	int i;
    813
    814	if (priv->join_status != CW1200_JOIN_STATUS_AP)
    815		goto done;
    816	if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN))
    817		goto done;
    818
    819	rcu_read_lock();
    820	sta = ieee80211_find_sta(priv->vif, pspoll->ta);
    821	if (sta) {
    822		struct cw1200_sta_priv *sta_priv;
    823		sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
    824		link_id = sta_priv->link_id;
    825		pspoll_mask = BIT(sta_priv->link_id);
    826	}
    827	rcu_read_unlock();
    828	if (!link_id)
    829		goto done;
    830
    831	priv->pspoll_mask |= pspoll_mask;
    832	drop = 0;
    833
    834	/* Do not report pspols if data for given link id is queued already. */
    835	for (i = 0; i < 4; ++i) {
    836		if (cw1200_queue_get_num_queued(&priv->tx_queue[i],
    837						pspoll_mask)) {
    838			cw1200_bh_wakeup(priv);
    839			drop = 1;
    840			break;
    841		}
    842	}
    843	pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd");
    844done:
    845	return drop;
    846}
    847
    848/* ******************************************************************** */
    849
    850void cw1200_tx_confirm_cb(struct cw1200_common *priv,
    851			  int link_id,
    852			  struct wsm_tx_confirm *arg)
    853{
    854	u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id);
    855	struct cw1200_queue *queue = &priv->tx_queue[queue_id];
    856	struct sk_buff *skb;
    857	const struct cw1200_txpriv *txpriv;
    858
    859	pr_debug("[TX] TX confirm: %d, %d.\n",
    860		 arg->status, arg->ack_failures);
    861
    862	if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
    863		/* STA is stopped. */
    864		return;
    865	}
    866
    867	if (WARN_ON(queue_id >= 4))
    868		return;
    869
    870	if (arg->status)
    871		pr_debug("TX failed: %d.\n", arg->status);
    872
    873	if ((arg->status == WSM_REQUEUE) &&
    874	    (arg->flags & WSM_TX_STATUS_REQUEUE)) {
    875		/* "Requeue" means "implicit suspend" */
    876		struct wsm_suspend_resume suspend = {
    877			.link_id = link_id,
    878			.stop = 1,
    879			.multicast = !link_id,
    880		};
    881		cw1200_suspend_resume(priv, &suspend);
    882		wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n",
    883			   link_id,
    884			   cw1200_queue_get_generation(arg->packet_id) + 1,
    885			   priv->sta_asleep_mask);
    886		cw1200_queue_requeue(queue, arg->packet_id);
    887		spin_lock_bh(&priv->ps_state_lock);
    888		if (!link_id) {
    889			priv->buffered_multicasts = true;
    890			if (priv->sta_asleep_mask) {
    891				queue_work(priv->workqueue,
    892					   &priv->multicast_start_work);
    893			}
    894		}
    895		spin_unlock_bh(&priv->ps_state_lock);
    896	} else if (!cw1200_queue_get_skb(queue, arg->packet_id,
    897					 &skb, &txpriv)) {
    898		struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
    899		int tx_count = arg->ack_failures;
    900		u8 ht_flags = 0;
    901		int i;
    902
    903		if (cw1200_ht_greenfield(&priv->ht_info))
    904			ht_flags |= IEEE80211_TX_RC_GREEN_FIELD;
    905
    906		spin_lock(&priv->bss_loss_lock);
    907		if (priv->bss_loss_state &&
    908		    arg->packet_id == priv->bss_loss_confirm_id) {
    909			if (arg->status) {
    910				/* Recovery failed */
    911				__cw1200_cqm_bssloss_sm(priv, 0, 0, 1);
    912			} else {
    913				/* Recovery succeeded */
    914				__cw1200_cqm_bssloss_sm(priv, 0, 1, 0);
    915			}
    916		}
    917		spin_unlock(&priv->bss_loss_lock);
    918
    919		if (!arg->status) {
    920			tx->flags |= IEEE80211_TX_STAT_ACK;
    921			++tx_count;
    922			cw1200_debug_txed(priv);
    923			if (arg->flags & WSM_TX_STATUS_AGGREGATION) {
    924				/* Do not report aggregation to mac80211:
    925				 * it confuses minstrel a lot.
    926				 */
    927				/* tx->flags |= IEEE80211_TX_STAT_AMPDU; */
    928				cw1200_debug_txed_agg(priv);
    929			}
    930		} else {
    931			if (tx_count)
    932				++tx_count;
    933		}
    934
    935		for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
    936			if (tx->status.rates[i].count >= tx_count) {
    937				tx->status.rates[i].count = tx_count;
    938				break;
    939			}
    940			tx_count -= tx->status.rates[i].count;
    941			if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS)
    942				tx->status.rates[i].flags |= ht_flags;
    943		}
    944
    945		for (++i; i < IEEE80211_TX_MAX_RATES; ++i) {
    946			tx->status.rates[i].count = 0;
    947			tx->status.rates[i].idx = -1;
    948		}
    949
    950		/* Pull off any crypto trailers that we added on */
    951		if (tx->control.hw_key) {
    952			skb_trim(skb, skb->len - tx->control.hw_key->icv_len);
    953			if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
    954				skb_trim(skb, skb->len - 8); /* MIC space */
    955		}
    956		cw1200_queue_remove(queue, arg->packet_id);
    957	}
    958	/* XXX TODO:  Only wake if there are pending transmits.. */
    959	cw1200_bh_wakeup(priv);
    960}
    961
    962static void cw1200_notify_buffered_tx(struct cw1200_common *priv,
    963			       struct sk_buff *skb, int link_id, int tid)
    964{
    965	struct ieee80211_sta *sta;
    966	struct ieee80211_hdr *hdr;
    967	u8 *buffered;
    968	u8 still_buffered = 0;
    969
    970	if (link_id && tid < CW1200_MAX_TID) {
    971		buffered = priv->link_id_db
    972				[link_id - 1].buffered;
    973
    974		spin_lock_bh(&priv->ps_state_lock);
    975		if (!WARN_ON(!buffered[tid]))
    976			still_buffered = --buffered[tid];
    977		spin_unlock_bh(&priv->ps_state_lock);
    978
    979		if (!still_buffered && tid < CW1200_MAX_TID) {
    980			hdr = (struct ieee80211_hdr *)skb->data;
    981			rcu_read_lock();
    982			sta = ieee80211_find_sta(priv->vif, hdr->addr1);
    983			if (sta)
    984				ieee80211_sta_set_buffered(sta, tid, false);
    985			rcu_read_unlock();
    986		}
    987	}
    988}
    989
    990void cw1200_skb_dtor(struct cw1200_common *priv,
    991		     struct sk_buff *skb,
    992		     const struct cw1200_txpriv *txpriv)
    993{
    994	skb_pull(skb, txpriv->offset);
    995	if (txpriv->rate_id != CW1200_INVALID_RATE_ID) {
    996		cw1200_notify_buffered_tx(priv, skb,
    997					  txpriv->raw_link_id, txpriv->tid);
    998		tx_policy_put(priv, txpriv->rate_id);
    999	}
   1000	ieee80211_tx_status(priv->hw, skb);
   1001}
   1002
   1003void cw1200_rx_cb(struct cw1200_common *priv,
   1004		  struct wsm_rx *arg,
   1005		  int link_id,
   1006		  struct sk_buff **skb_p)
   1007{
   1008	struct sk_buff *skb = *skb_p;
   1009	struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
   1010	struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
   1011	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
   1012	struct cw1200_link_entry *entry = NULL;
   1013	unsigned long grace_period;
   1014
   1015	bool early_data = false;
   1016	bool p2p = priv->vif && priv->vif->p2p;
   1017	size_t hdrlen;
   1018	hdr->flag = 0;
   1019
   1020	if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
   1021		/* STA is stopped. */
   1022		goto drop;
   1023	}
   1024
   1025	if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) {
   1026		entry =	&priv->link_id_db[link_id - 1];
   1027		if (entry->status == CW1200_LINK_SOFT &&
   1028		    ieee80211_is_data(frame->frame_control))
   1029			early_data = true;
   1030		entry->timestamp = jiffies;
   1031	} else if (p2p &&
   1032		   ieee80211_is_action(frame->frame_control) &&
   1033		   (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
   1034		pr_debug("[RX] Going to MAP&RESET link ID\n");
   1035		WARN_ON(work_pending(&priv->linkid_reset_work));
   1036		memcpy(&priv->action_frame_sa[0],
   1037		       ieee80211_get_SA(frame), ETH_ALEN);
   1038		priv->action_linkid = 0;
   1039		schedule_work(&priv->linkid_reset_work);
   1040	}
   1041
   1042	if (link_id && p2p &&
   1043	    ieee80211_is_action(frame->frame_control) &&
   1044	    (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
   1045		/* Link ID already exists for the ACTION frame.
   1046		 * Reset and Remap
   1047		 */
   1048		WARN_ON(work_pending(&priv->linkid_reset_work));
   1049		memcpy(&priv->action_frame_sa[0],
   1050		       ieee80211_get_SA(frame), ETH_ALEN);
   1051		priv->action_linkid = link_id;
   1052		schedule_work(&priv->linkid_reset_work);
   1053	}
   1054	if (arg->status) {
   1055		if (arg->status == WSM_STATUS_MICFAILURE) {
   1056			pr_debug("[RX] MIC failure.\n");
   1057			hdr->flag |= RX_FLAG_MMIC_ERROR;
   1058		} else if (arg->status == WSM_STATUS_NO_KEY_FOUND) {
   1059			pr_debug("[RX] No key found.\n");
   1060			goto drop;
   1061		} else {
   1062			pr_debug("[RX] Receive failure: %d.\n",
   1063				 arg->status);
   1064			goto drop;
   1065		}
   1066	}
   1067
   1068	if (skb->len < sizeof(struct ieee80211_pspoll)) {
   1069		wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than IEEE header.\n");
   1070		goto drop;
   1071	}
   1072
   1073	if (ieee80211_is_pspoll(frame->frame_control))
   1074		if (cw1200_handle_pspoll(priv, skb))
   1075			goto drop;
   1076
   1077	hdr->band = ((arg->channel_number & 0xff00) ||
   1078		     (arg->channel_number > 14)) ?
   1079			NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
   1080	hdr->freq = ieee80211_channel_to_frequency(
   1081			arg->channel_number,
   1082			hdr->band);
   1083
   1084	if (arg->rx_rate >= 14) {
   1085		hdr->encoding = RX_ENC_HT;
   1086		hdr->rate_idx = arg->rx_rate - 14;
   1087	} else if (arg->rx_rate >= 4) {
   1088		hdr->rate_idx = arg->rx_rate - 2;
   1089	} else {
   1090		hdr->rate_idx = arg->rx_rate;
   1091	}
   1092
   1093	hdr->signal = (s8)arg->rcpi_rssi;
   1094	hdr->antenna = 0;
   1095
   1096	hdrlen = ieee80211_hdrlen(frame->frame_control);
   1097
   1098	if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
   1099		size_t iv_len = 0, icv_len = 0;
   1100
   1101		hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
   1102
   1103		/* Oops... There is no fast way to ask mac80211 about
   1104		 * IV/ICV lengths. Even defineas are not exposed.
   1105		 */
   1106		switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
   1107		case WSM_RX_STATUS_WEP:
   1108			iv_len = 4 /* WEP_IV_LEN */;
   1109			icv_len = 4 /* WEP_ICV_LEN */;
   1110			break;
   1111		case WSM_RX_STATUS_TKIP:
   1112			iv_len = 8 /* TKIP_IV_LEN */;
   1113			icv_len = 4 /* TKIP_ICV_LEN */
   1114				+ 8 /*MICHAEL_MIC_LEN*/;
   1115			hdr->flag |= RX_FLAG_MMIC_STRIPPED;
   1116			break;
   1117		case WSM_RX_STATUS_AES:
   1118			iv_len = 8 /* CCMP_HDR_LEN */;
   1119			icv_len = 8 /* CCMP_MIC_LEN */;
   1120			break;
   1121		case WSM_RX_STATUS_WAPI:
   1122			iv_len = 18 /* WAPI_HDR_LEN */;
   1123			icv_len = 16 /* WAPI_MIC_LEN */;
   1124			break;
   1125		default:
   1126			pr_warn("Unknown encryption type %d\n",
   1127				WSM_RX_STATUS_ENCRYPTION(arg->flags));
   1128			goto drop;
   1129		}
   1130
   1131		/* Firmware strips ICV in case of MIC failure. */
   1132		if (arg->status == WSM_STATUS_MICFAILURE)
   1133			icv_len = 0;
   1134
   1135		if (skb->len < hdrlen + iv_len + icv_len) {
   1136			wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n");
   1137			goto drop;
   1138		}
   1139
   1140		/* Remove IV, ICV and MIC */
   1141		skb_trim(skb, skb->len - icv_len);
   1142		memmove(skb->data + iv_len, skb->data, hdrlen);
   1143		skb_pull(skb, iv_len);
   1144	}
   1145
   1146	/* Remove TSF from the end of frame */
   1147	if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
   1148		memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
   1149		hdr->mactime = le64_to_cpu(hdr->mactime);
   1150		if (skb->len >= 8)
   1151			skb_trim(skb, skb->len - 8);
   1152	} else {
   1153		hdr->mactime = 0;
   1154	}
   1155
   1156	cw1200_debug_rxed(priv);
   1157	if (arg->flags & WSM_RX_STATUS_AGGREGATE)
   1158		cw1200_debug_rxed_agg(priv);
   1159
   1160	if (ieee80211_is_action(frame->frame_control) &&
   1161	    (arg->flags & WSM_RX_STATUS_ADDRESS1)) {
   1162		if (cw1200_handle_action_rx(priv, skb))
   1163			return;
   1164	} else if (ieee80211_is_beacon(frame->frame_control) &&
   1165		   !arg->status && priv->vif &&
   1166		   ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) {
   1167		const u8 *tim_ie;
   1168		u8 *ies = ((struct ieee80211_mgmt *)
   1169			  (skb->data))->u.beacon.variable;
   1170		size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
   1171
   1172		tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
   1173		if (tim_ie) {
   1174			struct ieee80211_tim_ie *tim =
   1175				(struct ieee80211_tim_ie *)&tim_ie[2];
   1176
   1177			if (priv->join_dtim_period != tim->dtim_period) {
   1178				priv->join_dtim_period = tim->dtim_period;
   1179				queue_work(priv->workqueue,
   1180					   &priv->set_beacon_wakeup_period_work);
   1181			}
   1182		}
   1183
   1184		/* Disable beacon filter once we're associated... */
   1185		if (priv->disable_beacon_filter &&
   1186		    (priv->vif->bss_conf.assoc ||
   1187		     priv->vif->bss_conf.ibss_joined)) {
   1188			priv->disable_beacon_filter = false;
   1189			queue_work(priv->workqueue,
   1190				   &priv->update_filtering_work);
   1191		}
   1192	}
   1193
   1194	/* Stay awake after frame is received to give
   1195	 * userspace chance to react and acquire appropriate
   1196	 * wakelock.
   1197	 */
   1198	if (ieee80211_is_auth(frame->frame_control))
   1199		grace_period = 5 * HZ;
   1200	else if (ieee80211_is_deauth(frame->frame_control))
   1201		grace_period = 5 * HZ;
   1202	else
   1203		grace_period = 1 * HZ;
   1204	cw1200_pm_stay_awake(&priv->pm_state, grace_period);
   1205
   1206	if (early_data) {
   1207		spin_lock_bh(&priv->ps_state_lock);
   1208		/* Double-check status with lock held */
   1209		if (entry->status == CW1200_LINK_SOFT)
   1210			skb_queue_tail(&entry->rx_queue, skb);
   1211		else
   1212			ieee80211_rx_irqsafe(priv->hw, skb);
   1213		spin_unlock_bh(&priv->ps_state_lock);
   1214	} else {
   1215		ieee80211_rx_irqsafe(priv->hw, skb);
   1216	}
   1217	*skb_p = NULL;
   1218
   1219	return;
   1220
   1221drop:
   1222	/* TODO: update failure counters */
   1223	return;
   1224}
   1225
   1226/* ******************************************************************** */
   1227/* Security								*/
   1228
   1229int cw1200_alloc_key(struct cw1200_common *priv)
   1230{
   1231	int idx;
   1232
   1233	idx = ffs(~priv->key_map) - 1;
   1234	if (idx < 0 || idx > WSM_KEY_MAX_INDEX)
   1235		return -1;
   1236
   1237	priv->key_map |= BIT(idx);
   1238	priv->keys[idx].index = idx;
   1239	return idx;
   1240}
   1241
   1242void cw1200_free_key(struct cw1200_common *priv, int idx)
   1243{
   1244	BUG_ON(!(priv->key_map & BIT(idx)));
   1245	memset(&priv->keys[idx], 0, sizeof(priv->keys[idx]));
   1246	priv->key_map &= ~BIT(idx);
   1247}
   1248
   1249void cw1200_free_keys(struct cw1200_common *priv)
   1250{
   1251	memset(&priv->keys, 0, sizeof(priv->keys));
   1252	priv->key_map = 0;
   1253}
   1254
   1255int cw1200_upload_keys(struct cw1200_common *priv)
   1256{
   1257	int idx, ret = 0;
   1258	for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx)
   1259		if (priv->key_map & BIT(idx)) {
   1260			ret = wsm_add_key(priv, &priv->keys[idx]);
   1261			if (ret < 0)
   1262				break;
   1263		}
   1264	return ret;
   1265}
   1266
   1267/* Workaround for WFD test case 6.1.10 */
   1268void cw1200_link_id_reset(struct work_struct *work)
   1269{
   1270	struct cw1200_common *priv =
   1271		container_of(work, struct cw1200_common, linkid_reset_work);
   1272	int temp_linkid;
   1273
   1274	if (!priv->action_linkid) {
   1275		/* In GO mode we can receive ACTION frames without a linkID */
   1276		temp_linkid = cw1200_alloc_link_id(priv,
   1277				&priv->action_frame_sa[0]);
   1278		WARN_ON(!temp_linkid);
   1279		if (temp_linkid) {
   1280			/* Make sure we execute the WQ */
   1281			flush_workqueue(priv->workqueue);
   1282			/* Release the link ID */
   1283			spin_lock_bh(&priv->ps_state_lock);
   1284			priv->link_id_db[temp_linkid - 1].prev_status =
   1285				priv->link_id_db[temp_linkid - 1].status;
   1286			priv->link_id_db[temp_linkid - 1].status =
   1287				CW1200_LINK_RESET;
   1288			spin_unlock_bh(&priv->ps_state_lock);
   1289			wsm_lock_tx_async(priv);
   1290			if (queue_work(priv->workqueue,
   1291				       &priv->link_id_work) <= 0)
   1292				wsm_unlock_tx(priv);
   1293		}
   1294	} else {
   1295		spin_lock_bh(&priv->ps_state_lock);
   1296		priv->link_id_db[priv->action_linkid - 1].prev_status =
   1297			priv->link_id_db[priv->action_linkid - 1].status;
   1298		priv->link_id_db[priv->action_linkid - 1].status =
   1299			CW1200_LINK_RESET_REMAP;
   1300		spin_unlock_bh(&priv->ps_state_lock);
   1301		wsm_lock_tx_async(priv);
   1302		if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
   1303			wsm_unlock_tx(priv);
   1304		flush_workqueue(priv->workqueue);
   1305	}
   1306}
   1307
   1308int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac)
   1309{
   1310	int i, ret = 0;
   1311	spin_lock_bh(&priv->ps_state_lock);
   1312	for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
   1313		if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) &&
   1314		    priv->link_id_db[i].status) {
   1315			priv->link_id_db[i].timestamp = jiffies;
   1316			ret = i + 1;
   1317			break;
   1318		}
   1319	}
   1320	spin_unlock_bh(&priv->ps_state_lock);
   1321	return ret;
   1322}
   1323
   1324int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac)
   1325{
   1326	int i, ret = 0;
   1327	unsigned long max_inactivity = 0;
   1328	unsigned long now = jiffies;
   1329
   1330	spin_lock_bh(&priv->ps_state_lock);
   1331	for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
   1332		if (!priv->link_id_db[i].status) {
   1333			ret = i + 1;
   1334			break;
   1335		} else if (priv->link_id_db[i].status != CW1200_LINK_HARD &&
   1336			   !priv->tx_queue_stats.link_map_cache[i + 1]) {
   1337			unsigned long inactivity =
   1338				now - priv->link_id_db[i].timestamp;
   1339			if (inactivity < max_inactivity)
   1340				continue;
   1341			max_inactivity = inactivity;
   1342			ret = i + 1;
   1343		}
   1344	}
   1345	if (ret) {
   1346		struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1];
   1347		pr_debug("[AP] STA added, link_id: %d\n", ret);
   1348		entry->status = CW1200_LINK_RESERVE;
   1349		memcpy(&entry->mac, mac, ETH_ALEN);
   1350		memset(&entry->buffered, 0, CW1200_MAX_TID);
   1351		skb_queue_head_init(&entry->rx_queue);
   1352		wsm_lock_tx_async(priv);
   1353		if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
   1354			wsm_unlock_tx(priv);
   1355	} else {
   1356		wiphy_info(priv->hw->wiphy,
   1357			   "[AP] Early: no more link IDs available.\n");
   1358	}
   1359
   1360	spin_unlock_bh(&priv->ps_state_lock);
   1361	return ret;
   1362}
   1363
   1364void cw1200_link_id_work(struct work_struct *work)
   1365{
   1366	struct cw1200_common *priv =
   1367		container_of(work, struct cw1200_common, link_id_work);
   1368	wsm_flush_tx(priv);
   1369	cw1200_link_id_gc_work(&priv->link_id_gc_work.work);
   1370	wsm_unlock_tx(priv);
   1371}
   1372
   1373void cw1200_link_id_gc_work(struct work_struct *work)
   1374{
   1375	struct cw1200_common *priv =
   1376		container_of(work, struct cw1200_common, link_id_gc_work.work);
   1377	struct wsm_reset reset = {
   1378		.reset_statistics = false,
   1379	};
   1380	struct wsm_map_link map_link = {
   1381		.link_id = 0,
   1382	};
   1383	unsigned long now = jiffies;
   1384	unsigned long next_gc = -1;
   1385	long ttl;
   1386	bool need_reset;
   1387	u32 mask;
   1388	int i;
   1389
   1390	if (priv->join_status != CW1200_JOIN_STATUS_AP)
   1391		return;
   1392
   1393	wsm_lock_tx(priv);
   1394	spin_lock_bh(&priv->ps_state_lock);
   1395	for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
   1396		need_reset = false;
   1397		mask = BIT(i + 1);
   1398		if (priv->link_id_db[i].status == CW1200_LINK_RESERVE ||
   1399		    (priv->link_id_db[i].status == CW1200_LINK_HARD &&
   1400		     !(priv->link_id_map & mask))) {
   1401			if (priv->link_id_map & mask) {
   1402				priv->sta_asleep_mask &= ~mask;
   1403				priv->pspoll_mask &= ~mask;
   1404				need_reset = true;
   1405			}
   1406			priv->link_id_map |= mask;
   1407			if (priv->link_id_db[i].status != CW1200_LINK_HARD)
   1408				priv->link_id_db[i].status = CW1200_LINK_SOFT;
   1409			memcpy(map_link.mac_addr, priv->link_id_db[i].mac,
   1410			       ETH_ALEN);
   1411			spin_unlock_bh(&priv->ps_state_lock);
   1412			if (need_reset) {
   1413				reset.link_id = i + 1;
   1414				wsm_reset(priv, &reset);
   1415			}
   1416			map_link.link_id = i + 1;
   1417			wsm_map_link(priv, &map_link);
   1418			next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT);
   1419			spin_lock_bh(&priv->ps_state_lock);
   1420		} else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) {
   1421			ttl = priv->link_id_db[i].timestamp - now +
   1422					CW1200_LINK_ID_GC_TIMEOUT;
   1423			if (ttl <= 0) {
   1424				need_reset = true;
   1425				priv->link_id_db[i].status = CW1200_LINK_OFF;
   1426				priv->link_id_map &= ~mask;
   1427				priv->sta_asleep_mask &= ~mask;
   1428				priv->pspoll_mask &= ~mask;
   1429				eth_zero_addr(map_link.mac_addr);
   1430				spin_unlock_bh(&priv->ps_state_lock);
   1431				reset.link_id = i + 1;
   1432				wsm_reset(priv, &reset);
   1433				spin_lock_bh(&priv->ps_state_lock);
   1434			} else {
   1435				next_gc = min_t(unsigned long, next_gc, ttl);
   1436			}
   1437		} else if (priv->link_id_db[i].status == CW1200_LINK_RESET ||
   1438				priv->link_id_db[i].status ==
   1439				CW1200_LINK_RESET_REMAP) {
   1440			int status = priv->link_id_db[i].status;
   1441			priv->link_id_db[i].status =
   1442					priv->link_id_db[i].prev_status;
   1443			priv->link_id_db[i].timestamp = now;
   1444			reset.link_id = i + 1;
   1445			spin_unlock_bh(&priv->ps_state_lock);
   1446			wsm_reset(priv, &reset);
   1447			if (status == CW1200_LINK_RESET_REMAP) {
   1448				memcpy(map_link.mac_addr,
   1449				       priv->link_id_db[i].mac,
   1450				       ETH_ALEN);
   1451				map_link.link_id = i + 1;
   1452				wsm_map_link(priv, &map_link);
   1453				next_gc = min(next_gc,
   1454						CW1200_LINK_ID_GC_TIMEOUT);
   1455			}
   1456			spin_lock_bh(&priv->ps_state_lock);
   1457		}
   1458		if (need_reset) {
   1459			skb_queue_purge(&priv->link_id_db[i].rx_queue);
   1460			pr_debug("[AP] STA removed, link_id: %d\n",
   1461				 reset.link_id);
   1462		}
   1463	}
   1464	spin_unlock_bh(&priv->ps_state_lock);
   1465	if (next_gc != -1)
   1466		queue_delayed_work(priv->workqueue,
   1467				   &priv->link_id_gc_work, next_gc);
   1468	wsm_unlock_tx(priv);
   1469}