cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

4965-mac.c (188480B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/******************************************************************************
      3 *
      4 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
      5 *
      6 * Portions of this file are derived from the ipw3945 project, as well
      7 * as portions of the ieee80211 subsystem header files.
      8 *
      9 * Contact Information:
     10 *  Intel Linux Wireless <ilw@linux.intel.com>
     11 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     12 *
     13 *****************************************************************************/
     14
     15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     16
     17#include <linux/kernel.h>
     18#include <linux/module.h>
     19#include <linux/init.h>
     20#include <linux/pci.h>
     21#include <linux/slab.h>
     22#include <linux/dma-mapping.h>
     23#include <linux/delay.h>
     24#include <linux/sched.h>
     25#include <linux/skbuff.h>
     26#include <linux/netdevice.h>
     27#include <linux/firmware.h>
     28#include <linux/etherdevice.h>
     29#include <linux/if_arp.h>
     30#include <linux/units.h>
     31
     32#include <net/mac80211.h>
     33
     34#include <asm/div64.h>
     35
     36#define DRV_NAME        "iwl4965"
     37
     38#include "common.h"
     39#include "4965.h"
     40
     41/******************************************************************************
     42 *
     43 * module boiler plate
     44 *
     45 ******************************************************************************/
     46
     47/*
     48 * module name, copyright, version, etc.
     49 */
     50#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi 4965 driver for Linux"
     51
     52#ifdef CONFIG_IWLEGACY_DEBUG
     53#define VD "d"
     54#else
     55#define VD
     56#endif
     57
     58#define DRV_VERSION     IWLWIFI_VERSION VD
     59
     60MODULE_DESCRIPTION(DRV_DESCRIPTION);
     61MODULE_VERSION(DRV_VERSION);
     62MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
     63MODULE_LICENSE("GPL");
     64MODULE_ALIAS("iwl4965");
     65
     66void
     67il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
     68{
     69	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
     70		IL_ERR("Tx flush command to flush out all frames\n");
     71		if (!test_bit(S_EXIT_PENDING, &il->status))
     72			queue_work(il->workqueue, &il->tx_flush);
     73	}
     74}
     75
     76/*
     77 * EEPROM
     78 */
     79struct il_mod_params il4965_mod_params = {
     80	.restart_fw = 1,
     81	/* the rest are 0 by default */
     82};
     83
     84void
     85il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
     86{
     87	unsigned long flags;
     88	int i;
     89	spin_lock_irqsave(&rxq->lock, flags);
     90	INIT_LIST_HEAD(&rxq->rx_free);
     91	INIT_LIST_HEAD(&rxq->rx_used);
     92	/* Fill the rx_used queue with _all_ of the Rx buffers */
     93	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
     94		/* In the reset function, these buffers may have been allocated
     95		 * to an SKB, so we need to unmap and free potential storage */
     96		if (rxq->pool[i].page != NULL) {
     97			dma_unmap_page(&il->pci_dev->dev,
     98				       rxq->pool[i].page_dma,
     99				       PAGE_SIZE << il->hw_params.rx_page_order,
    100				       DMA_FROM_DEVICE);
    101			__il_free_pages(il, rxq->pool[i].page);
    102			rxq->pool[i].page = NULL;
    103		}
    104		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
    105	}
    106
    107	for (i = 0; i < RX_QUEUE_SIZE; i++)
    108		rxq->queue[i] = NULL;
    109
    110	/* Set us so that we have processed and used all buffers, but have
    111	 * not restocked the Rx queue with fresh buffers */
    112	rxq->read = rxq->write = 0;
    113	rxq->write_actual = 0;
    114	rxq->free_count = 0;
    115	spin_unlock_irqrestore(&rxq->lock, flags);
    116}
    117
    118int
    119il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
    120{
    121	u32 rb_size;
    122	const u32 rfdnlog = RX_QUEUE_SIZE_LOG;	/* 256 RBDs */
    123	u32 rb_timeout = 0;
    124
    125	if (il->cfg->mod_params->amsdu_size_8K)
    126		rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
    127	else
    128		rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
    129
    130	/* Stop Rx DMA */
    131	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
    132
    133	/* Reset driver's Rx queue write idx */
    134	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
    135
    136	/* Tell device where to find RBD circular buffer in DRAM */
    137	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
    138
    139	/* Tell device where in DRAM to update its Rx status */
    140	il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
    141
    142	/* Enable Rx DMA
    143	 * Direct rx interrupts to hosts
    144	 * Rx buffer size 4 or 8k
    145	 * RB timeout 0x10
    146	 * 256 RBDs
    147	 */
    148	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
    149	      FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
    150	      FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
    151	      FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
    152	      rb_size |
    153	      (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
    154	      (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
    155
    156	/* Set interrupt coalescing timer to default (2048 usecs) */
    157	il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
    158
    159	return 0;
    160}
    161
    162static void
    163il4965_set_pwr_vmain(struct il_priv *il)
    164{
    165/*
    166 * (for documentation purposes)
    167 * to set power to V_AUX, do:
    168
    169		if (pci_pme_capable(il->pci_dev, PCI_D3cold))
    170			il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
    171					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
    172					       ~APMG_PS_CTRL_MSK_PWR_SRC);
    173 */
    174
    175	il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
    176			      APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
    177			      ~APMG_PS_CTRL_MSK_PWR_SRC);
    178}
    179
    180int
    181il4965_hw_nic_init(struct il_priv *il)
    182{
    183	unsigned long flags;
    184	struct il_rx_queue *rxq = &il->rxq;
    185	int ret;
    186
    187	spin_lock_irqsave(&il->lock, flags);
    188	il_apm_init(il);
    189	/* Set interrupt coalescing calibration timer to default (512 usecs) */
    190	il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
    191	spin_unlock_irqrestore(&il->lock, flags);
    192
    193	il4965_set_pwr_vmain(il);
    194	il4965_nic_config(il);
    195
    196	/* Allocate the RX queue, or reset if it is already allocated */
    197	if (!rxq->bd) {
    198		ret = il_rx_queue_alloc(il);
    199		if (ret) {
    200			IL_ERR("Unable to initialize Rx queue\n");
    201			return -ENOMEM;
    202		}
    203	} else
    204		il4965_rx_queue_reset(il, rxq);
    205
    206	il4965_rx_replenish(il);
    207
    208	il4965_rx_init(il, rxq);
    209
    210	spin_lock_irqsave(&il->lock, flags);
    211
    212	rxq->need_update = 1;
    213	il_rx_queue_update_write_ptr(il, rxq);
    214
    215	spin_unlock_irqrestore(&il->lock, flags);
    216
    217	/* Allocate or reset and init all Tx and Command queues */
    218	if (!il->txq) {
    219		ret = il4965_txq_ctx_alloc(il);
    220		if (ret)
    221			return ret;
    222	} else
    223		il4965_txq_ctx_reset(il);
    224
    225	set_bit(S_INIT, &il->status);
    226
    227	return 0;
    228}
    229
    230/*
    231 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
    232 */
    233static inline __le32
    234il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
    235{
    236	return cpu_to_le32((u32) (dma_addr >> 8));
    237}
    238
    239/*
    240 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
    241 *
    242 * If there are slots in the RX queue that need to be restocked,
    243 * and we have free pre-allocated buffers, fill the ranks as much
    244 * as we can, pulling from rx_free.
    245 *
    246 * This moves the 'write' idx forward to catch up with 'processed', and
    247 * also updates the memory address in the firmware to reference the new
    248 * target buffer.
    249 */
    250void
    251il4965_rx_queue_restock(struct il_priv *il)
    252{
    253	struct il_rx_queue *rxq = &il->rxq;
    254	struct list_head *element;
    255	struct il_rx_buf *rxb;
    256	unsigned long flags;
    257
    258	spin_lock_irqsave(&rxq->lock, flags);
    259	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
    260		/* The overwritten rxb must be a used one */
    261		rxb = rxq->queue[rxq->write];
    262		BUG_ON(rxb && rxb->page);
    263
    264		/* Get next free Rx buffer, remove from free list */
    265		element = rxq->rx_free.next;
    266		rxb = list_entry(element, struct il_rx_buf, list);
    267		list_del(element);
    268
    269		/* Point to Rx buffer via next RBD in circular buffer */
    270		rxq->bd[rxq->write] =
    271		    il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
    272		rxq->queue[rxq->write] = rxb;
    273		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
    274		rxq->free_count--;
    275	}
    276	spin_unlock_irqrestore(&rxq->lock, flags);
    277	/* If the pre-allocated buffer pool is dropping low, schedule to
    278	 * refill it */
    279	if (rxq->free_count <= RX_LOW_WATERMARK)
    280		queue_work(il->workqueue, &il->rx_replenish);
    281
    282	/* If we've added more space for the firmware to place data, tell it.
    283	 * Increment device's write pointer in multiples of 8. */
    284	if (rxq->write_actual != (rxq->write & ~0x7)) {
    285		spin_lock_irqsave(&rxq->lock, flags);
    286		rxq->need_update = 1;
    287		spin_unlock_irqrestore(&rxq->lock, flags);
    288		il_rx_queue_update_write_ptr(il, rxq);
    289	}
    290}
    291
    292/*
    293 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
    294 *
    295 * When moving to rx_free an SKB is allocated for the slot.
    296 *
    297 * Also restock the Rx queue via il_rx_queue_restock.
    298 * This is called as a scheduled work item (except for during initialization)
    299 */
    300static void
    301il4965_rx_allocate(struct il_priv *il, gfp_t priority)
    302{
    303	struct il_rx_queue *rxq = &il->rxq;
    304	struct list_head *element;
    305	struct il_rx_buf *rxb;
    306	struct page *page;
    307	dma_addr_t page_dma;
    308	unsigned long flags;
    309	gfp_t gfp_mask = priority;
    310
    311	while (1) {
    312		spin_lock_irqsave(&rxq->lock, flags);
    313		if (list_empty(&rxq->rx_used)) {
    314			spin_unlock_irqrestore(&rxq->lock, flags);
    315			return;
    316		}
    317		spin_unlock_irqrestore(&rxq->lock, flags);
    318
    319		if (rxq->free_count > RX_LOW_WATERMARK)
    320			gfp_mask |= __GFP_NOWARN;
    321
    322		if (il->hw_params.rx_page_order > 0)
    323			gfp_mask |= __GFP_COMP;
    324
    325		/* Alloc a new receive buffer */
    326		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
    327		if (!page) {
    328			if (net_ratelimit())
    329				D_INFO("alloc_pages failed, " "order: %d\n",
    330				       il->hw_params.rx_page_order);
    331
    332			if (rxq->free_count <= RX_LOW_WATERMARK &&
    333			    net_ratelimit())
    334				IL_ERR("Failed to alloc_pages with %s. "
    335				       "Only %u free buffers remaining.\n",
    336				       priority ==
    337				       GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
    338				       rxq->free_count);
    339			/* We don't reschedule replenish work here -- we will
    340			 * call the restock method and if it still needs
    341			 * more buffers it will schedule replenish */
    342			return;
    343		}
    344
    345		/* Get physical address of the RB */
    346		page_dma = dma_map_page(&il->pci_dev->dev, page, 0,
    347					PAGE_SIZE << il->hw_params.rx_page_order,
    348					DMA_FROM_DEVICE);
    349		if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) {
    350			__free_pages(page, il->hw_params.rx_page_order);
    351			break;
    352		}
    353
    354		spin_lock_irqsave(&rxq->lock, flags);
    355
    356		if (list_empty(&rxq->rx_used)) {
    357			spin_unlock_irqrestore(&rxq->lock, flags);
    358			dma_unmap_page(&il->pci_dev->dev, page_dma,
    359				       PAGE_SIZE << il->hw_params.rx_page_order,
    360				       DMA_FROM_DEVICE);
    361			__free_pages(page, il->hw_params.rx_page_order);
    362			return;
    363		}
    364
    365		element = rxq->rx_used.next;
    366		rxb = list_entry(element, struct il_rx_buf, list);
    367		list_del(element);
    368
    369		BUG_ON(rxb->page);
    370
    371		rxb->page = page;
    372		rxb->page_dma = page_dma;
    373		list_add_tail(&rxb->list, &rxq->rx_free);
    374		rxq->free_count++;
    375		il->alloc_rxb_page++;
    376
    377		spin_unlock_irqrestore(&rxq->lock, flags);
    378	}
    379}
    380
    381void
    382il4965_rx_replenish(struct il_priv *il)
    383{
    384	unsigned long flags;
    385
    386	il4965_rx_allocate(il, GFP_KERNEL);
    387
    388	spin_lock_irqsave(&il->lock, flags);
    389	il4965_rx_queue_restock(il);
    390	spin_unlock_irqrestore(&il->lock, flags);
    391}
    392
    393void
    394il4965_rx_replenish_now(struct il_priv *il)
    395{
    396	il4965_rx_allocate(il, GFP_ATOMIC);
    397
    398	il4965_rx_queue_restock(il);
    399}
    400
    401/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
    402 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
    403 * This free routine walks the list of POOL entries and if SKB is set to
    404 * non NULL it is unmapped and freed
    405 */
    406void
    407il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
    408{
    409	int i;
    410	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
    411		if (rxq->pool[i].page != NULL) {
    412			dma_unmap_page(&il->pci_dev->dev,
    413				       rxq->pool[i].page_dma,
    414				       PAGE_SIZE << il->hw_params.rx_page_order,
    415				       DMA_FROM_DEVICE);
    416			__il_free_pages(il, rxq->pool[i].page);
    417			rxq->pool[i].page = NULL;
    418		}
    419	}
    420
    421	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
    422			  rxq->bd_dma);
    423	dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
    424			  rxq->rb_stts, rxq->rb_stts_dma);
    425	rxq->bd = NULL;
    426	rxq->rb_stts = NULL;
    427}
    428
    429int
    430il4965_rxq_stop(struct il_priv *il)
    431{
    432	int ret;
    433
    434	_il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
    435	ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
    436			   FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
    437			   FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
    438			   1000);
    439	if (ret < 0)
    440		IL_ERR("Can't stop Rx DMA.\n");
    441
    442	return 0;
    443}
    444
    445int
    446il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
    447{
    448	int idx = 0;
    449	int band_offset = 0;
    450
    451	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
    452	if (rate_n_flags & RATE_MCS_HT_MSK) {
    453		idx = (rate_n_flags & 0xff);
    454		return idx;
    455		/* Legacy rate format, search for match in table */
    456	} else {
    457		if (band == NL80211_BAND_5GHZ)
    458			band_offset = IL_FIRST_OFDM_RATE;
    459		for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
    460			if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
    461				return idx - band_offset;
    462	}
    463
    464	return -1;
    465}
    466
    467static int
    468il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
    469{
    470	/* data from PHY/DSP regarding signal strength, etc.,
    471	 *   contents are always there, not configurable by host.  */
    472	struct il4965_rx_non_cfg_phy *ncphy =
    473	    (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
    474	u32 agc =
    475	    (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
    476	    IL49_AGC_DB_POS;
    477
    478	u32 valid_antennae =
    479	    (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
    480	    >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
    481	u8 max_rssi = 0;
    482	u32 i;
    483
    484	/* Find max rssi among 3 possible receivers.
    485	 * These values are measured by the digital signal processor (DSP).
    486	 * They should stay fairly constant even as the signal strength varies,
    487	 *   if the radio's automatic gain control (AGC) is working right.
    488	 * AGC value (see below) will provide the "interesting" info. */
    489	for (i = 0; i < 3; i++)
    490		if (valid_antennae & (1 << i))
    491			max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
    492
    493	D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
    494		ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
    495		max_rssi, agc);
    496
    497	/* dBm = max_rssi dB - agc dB - constant.
    498	 * Higher AGC (higher radio gain) means lower signal. */
    499	return max_rssi - agc - IL4965_RSSI_OFFSET;
    500}
    501
    502static u32
    503il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
    504{
    505	u32 decrypt_out = 0;
    506
    507	if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
    508	    RX_RES_STATUS_STATION_FOUND)
    509		decrypt_out |=
    510		    (RX_RES_STATUS_STATION_FOUND |
    511		     RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
    512
    513	decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
    514
    515	/* packet was not encrypted */
    516	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
    517	    RX_RES_STATUS_SEC_TYPE_NONE)
    518		return decrypt_out;
    519
    520	/* packet was encrypted with unknown alg */
    521	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
    522	    RX_RES_STATUS_SEC_TYPE_ERR)
    523		return decrypt_out;
    524
    525	/* decryption was not done in HW */
    526	if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
    527	    RX_MPDU_RES_STATUS_DEC_DONE_MSK)
    528		return decrypt_out;
    529
    530	switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
    531
    532	case RX_RES_STATUS_SEC_TYPE_CCMP:
    533		/* alg is CCM: check MIC only */
    534		if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
    535			/* Bad MIC */
    536			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
    537		else
    538			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
    539
    540		break;
    541
    542	case RX_RES_STATUS_SEC_TYPE_TKIP:
    543		if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
    544			/* Bad TTAK */
    545			decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
    546			break;
    547		}
    548		fallthrough;	/* if TTAK OK */
    549	default:
    550		if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
    551			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
    552		else
    553			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
    554		break;
    555	}
    556
    557	D_RX("decrypt_in:0x%x  decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
    558
    559	return decrypt_out;
    560}
    561
    562#define SMALL_PACKET_SIZE 256
    563
    564static void
    565il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
    566			       u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
    567			       struct ieee80211_rx_status *stats)
    568{
    569	struct sk_buff *skb;
    570	__le16 fc = hdr->frame_control;
    571
    572	/* We only process data packets if the interface is open */
    573	if (unlikely(!il->is_open)) {
    574		D_DROP("Dropping packet while interface is not open.\n");
    575		return;
    576	}
    577
    578	if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
    579		il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
    580		D_INFO("Woke queues - frame received on passive channel\n");
    581	}
    582
    583	/* In case of HW accelerated crypto and bad decryption, drop */
    584	if (!il->cfg->mod_params->sw_crypto &&
    585	    il_set_decrypted_flag(il, hdr, ampdu_status, stats))
    586		return;
    587
    588	skb = dev_alloc_skb(SMALL_PACKET_SIZE);
    589	if (!skb) {
    590		IL_ERR("dev_alloc_skb failed\n");
    591		return;
    592	}
    593
    594	if (len <= SMALL_PACKET_SIZE) {
    595		skb_put_data(skb, hdr, len);
    596	} else {
    597		skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
    598				len, PAGE_SIZE << il->hw_params.rx_page_order);
    599		il->alloc_rxb_page--;
    600		rxb->page = NULL;
    601	}
    602
    603	il_update_stats(il, false, fc, len);
    604	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
    605
    606	ieee80211_rx(il->hw, skb);
    607}
    608
    609/* Called for N_RX (legacy ABG frames), or
    610 * N_RX_MPDU (HT high-throughput N frames). */
    611static void
    612il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
    613{
    614	struct ieee80211_hdr *header;
    615	struct ieee80211_rx_status rx_status = {};
    616	struct il_rx_pkt *pkt = rxb_addr(rxb);
    617	struct il_rx_phy_res *phy_res;
    618	__le32 rx_pkt_status;
    619	struct il_rx_mpdu_res_start *amsdu;
    620	u32 len;
    621	u32 ampdu_status;
    622	u32 rate_n_flags;
    623
    624	/**
    625	 * N_RX and N_RX_MPDU are handled differently.
    626	 *	N_RX: physical layer info is in this buffer
    627	 *	N_RX_MPDU: physical layer info was sent in separate
    628	 *		command and cached in il->last_phy_res
    629	 *
    630	 * Here we set up local variables depending on which command is
    631	 * received.
    632	 */
    633	if (pkt->hdr.cmd == N_RX) {
    634		phy_res = (struct il_rx_phy_res *)pkt->u.raw;
    635		header =
    636		    (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
    637					     phy_res->cfg_phy_cnt);
    638
    639		len = le16_to_cpu(phy_res->byte_count);
    640		rx_pkt_status =
    641		    *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
    642				 phy_res->cfg_phy_cnt + len);
    643		ampdu_status = le32_to_cpu(rx_pkt_status);
    644	} else {
    645		if (!il->_4965.last_phy_res_valid) {
    646			IL_ERR("MPDU frame without cached PHY data\n");
    647			return;
    648		}
    649		phy_res = &il->_4965.last_phy_res;
    650		amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
    651		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
    652		len = le16_to_cpu(amsdu->byte_count);
    653		rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
    654		ampdu_status =
    655		    il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
    656	}
    657
    658	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
    659		D_DROP("dsp size out of range [0,20]: %d\n",
    660		       phy_res->cfg_phy_cnt);
    661		return;
    662	}
    663
    664	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
    665	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
    666		D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
    667		return;
    668	}
    669
    670	/* This will be used in several places later */
    671	rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
    672
    673	/* rx_status carries information about the packet to mac80211 */
    674	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
    675	rx_status.band =
    676	    (phy_res->
    677	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
    678	    NL80211_BAND_5GHZ;
    679	rx_status.freq =
    680	    ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
    681					   rx_status.band);
    682	rx_status.rate_idx =
    683	    il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
    684	rx_status.flag = 0;
    685
    686	/* TSF isn't reliable. In order to allow smooth user experience,
    687	 * this W/A doesn't propagate it to the mac80211 */
    688	/*rx_status.flag |= RX_FLAG_MACTIME_START; */
    689
    690	il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
    691
    692	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
    693	rx_status.signal = il4965_calc_rssi(il, phy_res);
    694
    695	D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
    696		(unsigned long long)rx_status.mactime);
    697
    698	/*
    699	 * "antenna number"
    700	 *
    701	 * It seems that the antenna field in the phy flags value
    702	 * is actually a bit field. This is undefined by radiotap,
    703	 * it wants an actual antenna number but I always get "7"
    704	 * for most legacy frames I receive indicating that the
    705	 * same frame was received on all three RX chains.
    706	 *
    707	 * I think this field should be removed in favor of a
    708	 * new 802.11n radiotap field "RX chains" that is defined
    709	 * as a bitmask.
    710	 */
    711	rx_status.antenna =
    712	    (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
    713	    RX_RES_PHY_FLAGS_ANTENNA_POS;
    714
    715	/* set the preamble flag if appropriate */
    716	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
    717		rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
    718
    719	/* Set up the HT phy flags */
    720	if (rate_n_flags & RATE_MCS_HT_MSK)
    721		rx_status.encoding = RX_ENC_HT;
    722	if (rate_n_flags & RATE_MCS_HT40_MSK)
    723		rx_status.bw = RATE_INFO_BW_40;
    724	else
    725		rx_status.bw = RATE_INFO_BW_20;
    726	if (rate_n_flags & RATE_MCS_SGI_MSK)
    727		rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
    728
    729	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
    730		/* We know which subframes of an A-MPDU belong
    731		 * together since we get a single PHY response
    732		 * from the firmware for all of them.
    733		 */
    734
    735		rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
    736		rx_status.ampdu_reference = il->_4965.ampdu_ref;
    737	}
    738
    739	il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
    740				       &rx_status);
    741}
    742
    743/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
    744 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
    745static void
    746il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
    747{
    748	struct il_rx_pkt *pkt = rxb_addr(rxb);
    749	il->_4965.last_phy_res_valid = true;
    750	il->_4965.ampdu_ref++;
    751	memcpy(&il->_4965.last_phy_res, pkt->u.raw,
    752	       sizeof(struct il_rx_phy_res));
    753}
    754
    755static int
    756il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
    757			     enum nl80211_band band, u8 is_active,
    758			     u8 n_probes, struct il_scan_channel *scan_ch)
    759{
    760	struct ieee80211_channel *chan;
    761	const struct ieee80211_supported_band *sband;
    762	const struct il_channel_info *ch_info;
    763	u16 passive_dwell = 0;
    764	u16 active_dwell = 0;
    765	int added, i;
    766	u16 channel;
    767
    768	sband = il_get_hw_mode(il, band);
    769	if (!sband)
    770		return 0;
    771
    772	active_dwell = il_get_active_dwell_time(il, band, n_probes);
    773	passive_dwell = il_get_passive_dwell_time(il, band, vif);
    774
    775	if (passive_dwell <= active_dwell)
    776		passive_dwell = active_dwell + 1;
    777
    778	for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
    779		chan = il->scan_request->channels[i];
    780
    781		if (chan->band != band)
    782			continue;
    783
    784		channel = chan->hw_value;
    785		scan_ch->channel = cpu_to_le16(channel);
    786
    787		ch_info = il_get_channel_info(il, band, channel);
    788		if (!il_is_channel_valid(ch_info)) {
    789			D_SCAN("Channel %d is INVALID for this band.\n",
    790			       channel);
    791			continue;
    792		}
    793
    794		if (!is_active || il_is_channel_passive(ch_info) ||
    795		    (chan->flags & IEEE80211_CHAN_NO_IR))
    796			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
    797		else
    798			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
    799
    800		if (n_probes)
    801			scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
    802
    803		scan_ch->active_dwell = cpu_to_le16(active_dwell);
    804		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
    805
    806		/* Set txpower levels to defaults */
    807		scan_ch->dsp_atten = 110;
    808
    809		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
    810		 * power level:
    811		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
    812		 */
    813		if (band == NL80211_BAND_5GHZ)
    814			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
    815		else
    816			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
    817
    818		D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
    819		       le32_to_cpu(scan_ch->type),
    820		       (scan_ch->
    821			type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
    822		       (scan_ch->
    823			type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
    824		       passive_dwell);
    825
    826		scan_ch++;
    827		added++;
    828	}
    829
    830	D_SCAN("total channels to scan %d\n", added);
    831	return added;
    832}
    833
    834static void
    835il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
    836{
    837	int i;
    838	u8 ind = *ant;
    839
    840	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
    841		ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
    842		if (valid & BIT(ind)) {
    843			*ant = ind;
    844			return;
    845		}
    846	}
    847}
    848
    849int
    850il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
    851{
    852	struct il_host_cmd cmd = {
    853		.id = C_SCAN,
    854		.len = sizeof(struct il_scan_cmd),
    855		.flags = CMD_SIZE_HUGE,
    856	};
    857	struct il_scan_cmd *scan;
    858	u32 rate_flags = 0;
    859	u16 cmd_len;
    860	u16 rx_chain = 0;
    861	enum nl80211_band band;
    862	u8 n_probes = 0;
    863	u8 rx_ant = il->hw_params.valid_rx_ant;
    864	u8 rate;
    865	bool is_active = false;
    866	int chan_mod;
    867	u8 active_chains;
    868	u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
    869	int ret;
    870
    871	lockdep_assert_held(&il->mutex);
    872
    873	if (!il->scan_cmd) {
    874		il->scan_cmd =
    875		    kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
    876			    GFP_KERNEL);
    877		if (!il->scan_cmd) {
    878			D_SCAN("fail to allocate memory for scan\n");
    879			return -ENOMEM;
    880		}
    881	}
    882	scan = il->scan_cmd;
    883	memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
    884
    885	scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
    886	scan->quiet_time = IL_ACTIVE_QUIET_TIME;
    887
    888	if (il_is_any_associated(il)) {
    889		u16 interval;
    890		u32 extra;
    891		u32 suspend_time = 100;
    892		u32 scan_suspend_time = 100;
    893
    894		D_INFO("Scanning while associated...\n");
    895		interval = vif->bss_conf.beacon_int;
    896
    897		scan->suspend_time = 0;
    898		scan->max_out_time = cpu_to_le32(200 * 1024);
    899		if (!interval)
    900			interval = suspend_time;
    901
    902		extra = (suspend_time / interval) << 22;
    903		scan_suspend_time =
    904		    (extra | ((suspend_time % interval) * 1024));
    905		scan->suspend_time = cpu_to_le32(scan_suspend_time);
    906		D_SCAN("suspend_time 0x%X beacon interval %d\n",
    907		       scan_suspend_time, interval);
    908	}
    909
    910	if (il->scan_request->n_ssids) {
    911		int i, p = 0;
    912		D_SCAN("Kicking off active scan\n");
    913		for (i = 0; i < il->scan_request->n_ssids; i++) {
    914			/* always does wildcard anyway */
    915			if (!il->scan_request->ssids[i].ssid_len)
    916				continue;
    917			scan->direct_scan[p].id = WLAN_EID_SSID;
    918			scan->direct_scan[p].len =
    919			    il->scan_request->ssids[i].ssid_len;
    920			memcpy(scan->direct_scan[p].ssid,
    921			       il->scan_request->ssids[i].ssid,
    922			       il->scan_request->ssids[i].ssid_len);
    923			n_probes++;
    924			p++;
    925		}
    926		is_active = true;
    927	} else
    928		D_SCAN("Start passive scan.\n");
    929
    930	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
    931	scan->tx_cmd.sta_id = il->hw_params.bcast_id;
    932	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
    933
    934	switch (il->scan_band) {
    935	case NL80211_BAND_2GHZ:
    936		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
    937		chan_mod =
    938		    le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
    939		    RXON_FLG_CHANNEL_MODE_POS;
    940		if (chan_mod == CHANNEL_MODE_PURE_40) {
    941			rate = RATE_6M_PLCP;
    942		} else {
    943			rate = RATE_1M_PLCP;
    944			rate_flags = RATE_MCS_CCK_MSK;
    945		}
    946		break;
    947	case NL80211_BAND_5GHZ:
    948		rate = RATE_6M_PLCP;
    949		break;
    950	default:
    951		IL_WARN("Invalid scan band\n");
    952		return -EIO;
    953	}
    954
    955	/*
    956	 * If active scanning is requested but a certain channel is
    957	 * marked passive, we can do active scanning if we detect
    958	 * transmissions.
    959	 *
    960	 * There is an issue with some firmware versions that triggers
    961	 * a sysassert on a "good CRC threshold" of zero (== disabled),
    962	 * on a radar channel even though this means that we should NOT
    963	 * send probes.
    964	 *
    965	 * The "good CRC threshold" is the number of frames that we
    966	 * need to receive during our dwell time on a channel before
    967	 * sending out probes -- setting this to a huge value will
    968	 * mean we never reach it, but at the same time work around
    969	 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
    970	 * here instead of IL_GOOD_CRC_TH_DISABLED.
    971	 */
    972	scan->good_CRC_th =
    973	    is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
    974
    975	band = il->scan_band;
    976
    977	if (il->cfg->scan_rx_antennas[band])
    978		rx_ant = il->cfg->scan_rx_antennas[band];
    979
    980	il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
    981	rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
    982	scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
    983
    984	/* In power save mode use one chain, otherwise use all chains */
    985	if (test_bit(S_POWER_PMI, &il->status)) {
    986		/* rx_ant has been set to all valid chains previously */
    987		active_chains =
    988		    rx_ant & ((u8) (il->chain_noise_data.active_chains));
    989		if (!active_chains)
    990			active_chains = rx_ant;
    991
    992		D_SCAN("chain_noise_data.active_chains: %u\n",
    993		       il->chain_noise_data.active_chains);
    994
    995		rx_ant = il4965_first_antenna(active_chains);
    996	}
    997
    998	/* MIMO is not used here, but value is required */
    999	rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
   1000	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
   1001	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
   1002	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
   1003	scan->rx_chain = cpu_to_le16(rx_chain);
   1004
   1005	cmd_len =
   1006	    il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
   1007			      vif->addr, il->scan_request->ie,
   1008			      il->scan_request->ie_len,
   1009			      IL_MAX_SCAN_SIZE - sizeof(*scan));
   1010	scan->tx_cmd.len = cpu_to_le16(cmd_len);
   1011
   1012	scan->filter_flags |=
   1013	    (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
   1014
   1015	scan->channel_count =
   1016	    il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
   1017					 (void *)&scan->data[cmd_len]);
   1018	if (scan->channel_count == 0) {
   1019		D_SCAN("channel count %d\n", scan->channel_count);
   1020		return -EIO;
   1021	}
   1022
   1023	cmd.len +=
   1024	    le16_to_cpu(scan->tx_cmd.len) +
   1025	    scan->channel_count * sizeof(struct il_scan_channel);
   1026	cmd.data = scan;
   1027	scan->len = cpu_to_le16(cmd.len);
   1028
   1029	set_bit(S_SCAN_HW, &il->status);
   1030
   1031	ret = il_send_cmd_sync(il, &cmd);
   1032	if (ret)
   1033		clear_bit(S_SCAN_HW, &il->status);
   1034
   1035	return ret;
   1036}
   1037
   1038int
   1039il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
   1040			   bool add)
   1041{
   1042	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
   1043
   1044	if (add)
   1045		return il4965_add_bssid_station(il, vif->bss_conf.bssid,
   1046						&vif_priv->ibss_bssid_sta_id);
   1047	return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
   1048				 vif->bss_conf.bssid);
   1049}
   1050
   1051void
   1052il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
   1053{
   1054	lockdep_assert_held(&il->sta_lock);
   1055
   1056	if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
   1057		il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
   1058	else {
   1059		D_TX("free more than tfds_in_queue (%u:%d)\n",
   1060		     il->stations[sta_id].tid[tid].tfds_in_queue, freed);
   1061		il->stations[sta_id].tid[tid].tfds_in_queue = 0;
   1062	}
   1063}
   1064
   1065#define IL_TX_QUEUE_MSK	0xfffff
   1066
   1067static bool
   1068il4965_is_single_rx_stream(struct il_priv *il)
   1069{
   1070	return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
   1071	    il->current_ht_config.single_chain_sufficient;
   1072}
   1073
   1074#define IL_NUM_RX_CHAINS_MULTIPLE	3
   1075#define IL_NUM_RX_CHAINS_SINGLE	2
   1076#define IL_NUM_IDLE_CHAINS_DUAL	2
   1077#define IL_NUM_IDLE_CHAINS_SINGLE	1
   1078
   1079/*
   1080 * Determine how many receiver/antenna chains to use.
   1081 *
   1082 * More provides better reception via diversity.  Fewer saves power
   1083 * at the expense of throughput, but only when not in powersave to
   1084 * start with.
   1085 *
   1086 * MIMO (dual stream) requires at least 2, but works better with 3.
   1087 * This does not determine *which* chains to use, just how many.
   1088 */
   1089static int
   1090il4965_get_active_rx_chain_count(struct il_priv *il)
   1091{
   1092	/* # of Rx chains to use when expecting MIMO. */
   1093	if (il4965_is_single_rx_stream(il))
   1094		return IL_NUM_RX_CHAINS_SINGLE;
   1095	else
   1096		return IL_NUM_RX_CHAINS_MULTIPLE;
   1097}
   1098
   1099/*
   1100 * When we are in power saving mode, unless device support spatial
   1101 * multiplexing power save, use the active count for rx chain count.
   1102 */
   1103static int
   1104il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
   1105{
   1106	/* # Rx chains when idling, depending on SMPS mode */
   1107	switch (il->current_ht_config.smps) {
   1108	case IEEE80211_SMPS_STATIC:
   1109	case IEEE80211_SMPS_DYNAMIC:
   1110		return IL_NUM_IDLE_CHAINS_SINGLE;
   1111	case IEEE80211_SMPS_OFF:
   1112		return active_cnt;
   1113	default:
   1114		WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
   1115		return active_cnt;
   1116	}
   1117}
   1118
   1119/* up to 4 chains */
   1120static u8
   1121il4965_count_chain_bitmap(u32 chain_bitmap)
   1122{
   1123	u8 res;
   1124	res = (chain_bitmap & BIT(0)) >> 0;
   1125	res += (chain_bitmap & BIT(1)) >> 1;
   1126	res += (chain_bitmap & BIT(2)) >> 2;
   1127	res += (chain_bitmap & BIT(3)) >> 3;
   1128	return res;
   1129}
   1130
   1131/*
   1132 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
   1133 *
   1134 * Selects how many and which Rx receivers/antennas/chains to use.
   1135 * This should not be used for scan command ... it puts data in wrong place.
   1136 */
   1137void
   1138il4965_set_rxon_chain(struct il_priv *il)
   1139{
   1140	bool is_single = il4965_is_single_rx_stream(il);
   1141	bool is_cam = !test_bit(S_POWER_PMI, &il->status);
   1142	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
   1143	u32 active_chains;
   1144	u16 rx_chain;
   1145
   1146	/* Tell uCode which antennas are actually connected.
   1147	 * Before first association, we assume all antennas are connected.
   1148	 * Just after first association, il4965_chain_noise_calibration()
   1149	 *    checks which antennas actually *are* connected. */
   1150	if (il->chain_noise_data.active_chains)
   1151		active_chains = il->chain_noise_data.active_chains;
   1152	else
   1153		active_chains = il->hw_params.valid_rx_ant;
   1154
   1155	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
   1156
   1157	/* How many receivers should we use? */
   1158	active_rx_cnt = il4965_get_active_rx_chain_count(il);
   1159	idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
   1160
   1161	/* correct rx chain count according hw settings
   1162	 * and chain noise calibration
   1163	 */
   1164	valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
   1165	if (valid_rx_cnt < active_rx_cnt)
   1166		active_rx_cnt = valid_rx_cnt;
   1167
   1168	if (valid_rx_cnt < idle_rx_cnt)
   1169		idle_rx_cnt = valid_rx_cnt;
   1170
   1171	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
   1172	rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
   1173
   1174	il->staging.rx_chain = cpu_to_le16(rx_chain);
   1175
   1176	if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
   1177		il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
   1178	else
   1179		il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
   1180
   1181	D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
   1182		active_rx_cnt, idle_rx_cnt);
   1183
   1184	WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
   1185		active_rx_cnt < idle_rx_cnt);
   1186}
   1187
   1188static const char *
   1189il4965_get_fh_string(int cmd)
   1190{
   1191	switch (cmd) {
   1192		IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
   1193		IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
   1194		IL_CMD(FH49_RSCSR_CHNL0_WPTR);
   1195		IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
   1196		IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
   1197		IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
   1198		IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
   1199		IL_CMD(FH49_TSSR_TX_STATUS_REG);
   1200		IL_CMD(FH49_TSSR_TX_ERROR_REG);
   1201	default:
   1202		return "UNKNOWN";
   1203	}
   1204}
   1205
   1206int
   1207il4965_dump_fh(struct il_priv *il, char **buf, bool display)
   1208{
   1209	int i;
   1210#ifdef CONFIG_IWLEGACY_DEBUG
   1211	int pos = 0;
   1212	size_t bufsz = 0;
   1213#endif
   1214	static const u32 fh_tbl[] = {
   1215		FH49_RSCSR_CHNL0_STTS_WPTR_REG,
   1216		FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
   1217		FH49_RSCSR_CHNL0_WPTR,
   1218		FH49_MEM_RCSR_CHNL0_CONFIG_REG,
   1219		FH49_MEM_RSSR_SHARED_CTRL_REG,
   1220		FH49_MEM_RSSR_RX_STATUS_REG,
   1221		FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
   1222		FH49_TSSR_TX_STATUS_REG,
   1223		FH49_TSSR_TX_ERROR_REG
   1224	};
   1225#ifdef CONFIG_IWLEGACY_DEBUG
   1226	if (display) {
   1227		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
   1228		*buf = kmalloc(bufsz, GFP_KERNEL);
   1229		if (!*buf)
   1230			return -ENOMEM;
   1231		pos +=
   1232		    scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
   1233		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
   1234			pos +=
   1235			    scnprintf(*buf + pos, bufsz - pos,
   1236				      "  %34s: 0X%08x\n",
   1237				      il4965_get_fh_string(fh_tbl[i]),
   1238				      il_rd(il, fh_tbl[i]));
   1239		}
   1240		return pos;
   1241	}
   1242#endif
   1243	IL_ERR("FH register values:\n");
   1244	for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
   1245		IL_ERR("  %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
   1246		       il_rd(il, fh_tbl[i]));
   1247	}
   1248	return 0;
   1249}
   1250
   1251static void
   1252il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
   1253{
   1254	struct il_rx_pkt *pkt = rxb_addr(rxb);
   1255	struct il_missed_beacon_notif *missed_beacon;
   1256
   1257	missed_beacon = &pkt->u.missed_beacon;
   1258	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
   1259	    il->missed_beacon_threshold) {
   1260		D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
   1261			le32_to_cpu(missed_beacon->consecutive_missed_beacons),
   1262			le32_to_cpu(missed_beacon->total_missed_becons),
   1263			le32_to_cpu(missed_beacon->num_recvd_beacons),
   1264			le32_to_cpu(missed_beacon->num_expected_beacons));
   1265		if (!test_bit(S_SCANNING, &il->status))
   1266			il4965_init_sensitivity(il);
   1267	}
   1268}
   1269
   1270/* Calculate noise level, based on measurements during network silence just
   1271 *   before arriving beacon.  This measurement can be done only if we know
   1272 *   exactly when to expect beacons, therefore only when we're associated. */
   1273static void
   1274il4965_rx_calc_noise(struct il_priv *il)
   1275{
   1276	struct stats_rx_non_phy *rx_info;
   1277	int num_active_rx = 0;
   1278	int total_silence = 0;
   1279	int bcn_silence_a, bcn_silence_b, bcn_silence_c;
   1280	int last_rx_noise;
   1281
   1282	rx_info = &(il->_4965.stats.rx.general);
   1283	bcn_silence_a =
   1284	    le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
   1285	bcn_silence_b =
   1286	    le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
   1287	bcn_silence_c =
   1288	    le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
   1289
   1290	if (bcn_silence_a) {
   1291		total_silence += bcn_silence_a;
   1292		num_active_rx++;
   1293	}
   1294	if (bcn_silence_b) {
   1295		total_silence += bcn_silence_b;
   1296		num_active_rx++;
   1297	}
   1298	if (bcn_silence_c) {
   1299		total_silence += bcn_silence_c;
   1300		num_active_rx++;
   1301	}
   1302
   1303	/* Average among active antennas */
   1304	if (num_active_rx)
   1305		last_rx_noise = (total_silence / num_active_rx) - 107;
   1306	else
   1307		last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
   1308
   1309	D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
   1310		bcn_silence_b, bcn_silence_c, last_rx_noise);
   1311}
   1312
   1313#ifdef CONFIG_IWLEGACY_DEBUGFS
   1314/*
   1315 *  based on the assumption of all stats counter are in DWORD
   1316 *  FIXME: This function is for debugging, do not deal with
   1317 *  the case of counters roll-over.
   1318 */
   1319static void
   1320il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
   1321{
   1322	int i, size;
   1323	__le32 *prev_stats;
   1324	u32 *accum_stats;
   1325	u32 *delta, *max_delta;
   1326	struct stats_general_common *general, *accum_general;
   1327
   1328	prev_stats = (__le32 *) &il->_4965.stats;
   1329	accum_stats = (u32 *) &il->_4965.accum_stats;
   1330	size = sizeof(struct il_notif_stats);
   1331	general = &il->_4965.stats.general.common;
   1332	accum_general = &il->_4965.accum_stats.general.common;
   1333	delta = (u32 *) &il->_4965.delta_stats;
   1334	max_delta = (u32 *) &il->_4965.max_delta;
   1335
   1336	for (i = sizeof(__le32); i < size;
   1337	     i +=
   1338	     sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
   1339	     accum_stats++) {
   1340		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
   1341			*delta =
   1342			    (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
   1343			*accum_stats += *delta;
   1344			if (*delta > *max_delta)
   1345				*max_delta = *delta;
   1346		}
   1347	}
   1348
   1349	/* reset accumulative stats for "no-counter" type stats */
   1350	accum_general->temperature = general->temperature;
   1351	accum_general->ttl_timestamp = general->ttl_timestamp;
   1352}
   1353#endif
   1354
   1355static void
   1356il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
   1357{
   1358	const int recalib_seconds = 60;
   1359	bool change;
   1360	struct il_rx_pkt *pkt = rxb_addr(rxb);
   1361
   1362	D_RX("Statistics notification received (%d vs %d).\n",
   1363	     (int)sizeof(struct il_notif_stats),
   1364	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
   1365
   1366	change =
   1367	    ((il->_4965.stats.general.common.temperature !=
   1368	      pkt->u.stats.general.common.temperature) ||
   1369	     ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
   1370	      (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
   1371#ifdef CONFIG_IWLEGACY_DEBUGFS
   1372	il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
   1373#endif
   1374
   1375	/* TODO: reading some of stats is unneeded */
   1376	memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
   1377
   1378	set_bit(S_STATS, &il->status);
   1379
   1380	/*
   1381	 * Reschedule the stats timer to occur in recalib_seconds to ensure
   1382	 * we get a thermal update even if the uCode doesn't give us one
   1383	 */
   1384	mod_timer(&il->stats_periodic,
   1385		  jiffies + msecs_to_jiffies(recalib_seconds * 1000));
   1386
   1387	if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
   1388	    (pkt->hdr.cmd == N_STATS)) {
   1389		il4965_rx_calc_noise(il);
   1390		queue_work(il->workqueue, &il->run_time_calib_work);
   1391	}
   1392
   1393	if (change)
   1394		il4965_temperature_calib(il);
   1395}
   1396
   1397static void
   1398il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
   1399{
   1400	struct il_rx_pkt *pkt = rxb_addr(rxb);
   1401
   1402	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
   1403#ifdef CONFIG_IWLEGACY_DEBUGFS
   1404		memset(&il->_4965.accum_stats, 0,
   1405		       sizeof(struct il_notif_stats));
   1406		memset(&il->_4965.delta_stats, 0,
   1407		       sizeof(struct il_notif_stats));
   1408		memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
   1409#endif
   1410		D_RX("Statistics have been cleared\n");
   1411	}
   1412	il4965_hdl_stats(il, rxb);
   1413}
   1414
   1415
   1416/*
   1417 * mac80211 queues, ACs, hardware queues, FIFOs.
   1418 *
   1419 * Cf. https://wireless.wiki.kernel.org/en/developers/Documentation/mac80211/queues
   1420 *
   1421 * Mac80211 uses the following numbers, which we get as from it
   1422 * by way of skb_get_queue_mapping(skb):
   1423 *
   1424 *     VO      0
   1425 *     VI      1
   1426 *     BE      2
   1427 *     BK      3
   1428 *
   1429 *
   1430 * Regular (not A-MPDU) frames are put into hardware queues corresponding
   1431 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
   1432 * own queue per aggregation session (RA/TID combination), such queues are
   1433 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
   1434 * order to map frames to the right queue, we also need an AC->hw queue
   1435 * mapping. This is implemented here.
   1436 *
   1437 * Due to the way hw queues are set up (by the hw specific modules like
   1438 * 4965.c), the AC->hw queue mapping is the identity
   1439 * mapping.
   1440 */
   1441
   1442static const u8 tid_to_ac[] = {
   1443	IEEE80211_AC_BE,
   1444	IEEE80211_AC_BK,
   1445	IEEE80211_AC_BK,
   1446	IEEE80211_AC_BE,
   1447	IEEE80211_AC_VI,
   1448	IEEE80211_AC_VI,
   1449	IEEE80211_AC_VO,
   1450	IEEE80211_AC_VO
   1451};
   1452
   1453static inline int
   1454il4965_get_ac_from_tid(u16 tid)
   1455{
   1456	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
   1457		return tid_to_ac[tid];
   1458
   1459	/* no support for TIDs 8-15 yet */
   1460	return -EINVAL;
   1461}
   1462
   1463static inline int
   1464il4965_get_fifo_from_tid(u16 tid)
   1465{
   1466	static const u8 ac_to_fifo[] = {
   1467		IL_TX_FIFO_VO,
   1468		IL_TX_FIFO_VI,
   1469		IL_TX_FIFO_BE,
   1470		IL_TX_FIFO_BK,
   1471	};
   1472
   1473	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
   1474		return ac_to_fifo[tid_to_ac[tid]];
   1475
   1476	/* no support for TIDs 8-15 yet */
   1477	return -EINVAL;
   1478}
   1479
   1480/*
   1481 * handle build C_TX command notification.
   1482 */
   1483static void
   1484il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
   1485			  struct il_tx_cmd *tx_cmd,
   1486			  struct ieee80211_tx_info *info,
   1487			  struct ieee80211_hdr *hdr, u8 std_id)
   1488{
   1489	__le16 fc = hdr->frame_control;
   1490	__le32 tx_flags = tx_cmd->tx_flags;
   1491
   1492	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
   1493	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
   1494		tx_flags |= TX_CMD_FLG_ACK_MSK;
   1495		if (ieee80211_is_mgmt(fc))
   1496			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
   1497		if (ieee80211_is_probe_resp(fc) &&
   1498		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
   1499			tx_flags |= TX_CMD_FLG_TSF_MSK;
   1500	} else {
   1501		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
   1502		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
   1503	}
   1504
   1505	if (ieee80211_is_back_req(fc))
   1506		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
   1507
   1508	tx_cmd->sta_id = std_id;
   1509	if (ieee80211_has_morefrags(fc))
   1510		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
   1511
   1512	if (ieee80211_is_data_qos(fc)) {
   1513		u8 *qc = ieee80211_get_qos_ctl(hdr);
   1514		tx_cmd->tid_tspec = qc[0] & 0xf;
   1515		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
   1516	} else {
   1517		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
   1518	}
   1519
   1520	il_tx_cmd_protection(il, info, fc, &tx_flags);
   1521
   1522	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
   1523	if (ieee80211_is_mgmt(fc)) {
   1524		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
   1525			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
   1526		else
   1527			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
   1528	} else {
   1529		tx_cmd->timeout.pm_frame_timeout = 0;
   1530	}
   1531
   1532	tx_cmd->driver_txop = 0;
   1533	tx_cmd->tx_flags = tx_flags;
   1534	tx_cmd->next_frame_len = 0;
   1535}
   1536
   1537static void
   1538il4965_tx_cmd_build_rate(struct il_priv *il,
   1539			 struct il_tx_cmd *tx_cmd,
   1540			 struct ieee80211_tx_info *info,
   1541			 struct ieee80211_sta *sta,
   1542			 __le16 fc)
   1543{
   1544	const u8 rts_retry_limit = 60;
   1545	u32 rate_flags;
   1546	int rate_idx;
   1547	u8 data_retry_limit;
   1548	u8 rate_plcp;
   1549
   1550	/* Set retry limit on DATA packets and Probe Responses */
   1551	if (ieee80211_is_probe_resp(fc))
   1552		data_retry_limit = 3;
   1553	else
   1554		data_retry_limit = IL4965_DEFAULT_TX_RETRY;
   1555	tx_cmd->data_retry_limit = data_retry_limit;
   1556	/* Set retry limit on RTS packets */
   1557	tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
   1558
   1559	/* DATA packets will use the uCode station table for rate/antenna
   1560	 * selection */
   1561	if (ieee80211_is_data(fc)) {
   1562		tx_cmd->initial_rate_idx = 0;
   1563		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
   1564		return;
   1565	}
   1566
   1567	/**
   1568	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
   1569	 * not really a TX rate.  Thus, we use the lowest supported rate for
   1570	 * this band.  Also use the lowest supported rate if the stored rate
   1571	 * idx is invalid.
   1572	 */
   1573	rate_idx = info->control.rates[0].idx;
   1574	if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
   1575	    || rate_idx > RATE_COUNT_LEGACY)
   1576		rate_idx = rate_lowest_index(&il->bands[info->band], sta);
   1577	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
   1578	if (info->band == NL80211_BAND_5GHZ)
   1579		rate_idx += IL_FIRST_OFDM_RATE;
   1580	/* Get PLCP rate for tx_cmd->rate_n_flags */
   1581	rate_plcp = il_rates[rate_idx].plcp;
   1582	/* Zero out flags for this packet */
   1583	rate_flags = 0;
   1584
   1585	/* Set CCK flag as needed */
   1586	if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
   1587		rate_flags |= RATE_MCS_CCK_MSK;
   1588
   1589	/* Set up antennas */
   1590	il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
   1591	rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
   1592
   1593	/* Set the rate in the TX cmd */
   1594	tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
   1595}
   1596
   1597static void
   1598il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
   1599			     struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
   1600			     int sta_id)
   1601{
   1602	struct ieee80211_key_conf *keyconf = info->control.hw_key;
   1603
   1604	switch (keyconf->cipher) {
   1605	case WLAN_CIPHER_SUITE_CCMP:
   1606		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
   1607		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
   1608		if (info->flags & IEEE80211_TX_CTL_AMPDU)
   1609			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
   1610		D_TX("tx_cmd with AES hwcrypto\n");
   1611		break;
   1612
   1613	case WLAN_CIPHER_SUITE_TKIP:
   1614		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
   1615		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
   1616		D_TX("tx_cmd with tkip hwcrypto\n");
   1617		break;
   1618
   1619	case WLAN_CIPHER_SUITE_WEP104:
   1620		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
   1621		fallthrough;
   1622	case WLAN_CIPHER_SUITE_WEP40:
   1623		tx_cmd->sec_ctl |=
   1624		    (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
   1625		     TX_CMD_SEC_SHIFT);
   1626
   1627		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
   1628
   1629		D_TX("Configuring packet for WEP encryption " "with key %d\n",
   1630		     keyconf->keyidx);
   1631		break;
   1632
   1633	default:
   1634		IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
   1635		break;
   1636	}
   1637}
   1638
   1639/*
   1640 * start C_TX command process
   1641 */
   1642int
   1643il4965_tx_skb(struct il_priv *il,
   1644	      struct ieee80211_sta *sta,
   1645	      struct sk_buff *skb)
   1646{
   1647	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
   1648	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
   1649	struct il_station_priv *sta_priv = NULL;
   1650	struct il_tx_queue *txq;
   1651	struct il_queue *q;
   1652	struct il_device_cmd *out_cmd;
   1653	struct il_cmd_meta *out_meta;
   1654	struct il_tx_cmd *tx_cmd;
   1655	int txq_id;
   1656	dma_addr_t phys_addr;
   1657	dma_addr_t txcmd_phys;
   1658	dma_addr_t scratch_phys;
   1659	u16 len, firstlen, secondlen;
   1660	u16 seq_number = 0;
   1661	__le16 fc;
   1662	u8 hdr_len;
   1663	u8 sta_id;
   1664	u8 wait_write_ptr = 0;
   1665	u8 tid = 0;
   1666	u8 *qc = NULL;
   1667	unsigned long flags;
   1668	bool is_agg = false;
   1669
   1670	spin_lock_irqsave(&il->lock, flags);
   1671	if (il_is_rfkill(il)) {
   1672		D_DROP("Dropping - RF KILL\n");
   1673		goto drop_unlock;
   1674	}
   1675
   1676	fc = hdr->frame_control;
   1677
   1678#ifdef CONFIG_IWLEGACY_DEBUG
   1679	if (ieee80211_is_auth(fc))
   1680		D_TX("Sending AUTH frame\n");
   1681	else if (ieee80211_is_assoc_req(fc))
   1682		D_TX("Sending ASSOC frame\n");
   1683	else if (ieee80211_is_reassoc_req(fc))
   1684		D_TX("Sending REASSOC frame\n");
   1685#endif
   1686
   1687	hdr_len = ieee80211_hdrlen(fc);
   1688
   1689	/* For management frames use broadcast id to do not break aggregation */
   1690	if (!ieee80211_is_data(fc))
   1691		sta_id = il->hw_params.bcast_id;
   1692	else {
   1693		/* Find idx into station table for destination station */
   1694		sta_id = il_sta_id_or_broadcast(il, sta);
   1695
   1696		if (sta_id == IL_INVALID_STATION) {
   1697			D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
   1698			goto drop_unlock;
   1699		}
   1700	}
   1701
   1702	D_TX("station Id %d\n", sta_id);
   1703
   1704	if (sta)
   1705		sta_priv = (void *)sta->drv_priv;
   1706
   1707	if (sta_priv && sta_priv->asleep &&
   1708	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
   1709		/*
   1710		 * This sends an asynchronous command to the device,
   1711		 * but we can rely on it being processed before the
   1712		 * next frame is processed -- and the next frame to
   1713		 * this station is the one that will consume this
   1714		 * counter.
   1715		 * For now set the counter to just 1 since we do not
   1716		 * support uAPSD yet.
   1717		 */
   1718		il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
   1719	}
   1720
   1721	/* FIXME: remove me ? */
   1722	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
   1723
   1724	/* Access category (AC) is also the queue number */
   1725	txq_id = skb_get_queue_mapping(skb);
   1726
   1727	/* irqs already disabled/saved above when locking il->lock */
   1728	spin_lock(&il->sta_lock);
   1729
   1730	if (ieee80211_is_data_qos(fc)) {
   1731		qc = ieee80211_get_qos_ctl(hdr);
   1732		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
   1733		if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
   1734			spin_unlock(&il->sta_lock);
   1735			goto drop_unlock;
   1736		}
   1737		seq_number = il->stations[sta_id].tid[tid].seq_number;
   1738		seq_number &= IEEE80211_SCTL_SEQ;
   1739		hdr->seq_ctrl =
   1740		    hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
   1741		hdr->seq_ctrl |= cpu_to_le16(seq_number);
   1742		seq_number += 0x10;
   1743		/* aggregation is on for this <sta,tid> */
   1744		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
   1745		    il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
   1746			txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
   1747			is_agg = true;
   1748		}
   1749	}
   1750
   1751	txq = &il->txq[txq_id];
   1752	q = &txq->q;
   1753
   1754	if (unlikely(il_queue_space(q) < q->high_mark)) {
   1755		spin_unlock(&il->sta_lock);
   1756		goto drop_unlock;
   1757	}
   1758
   1759	if (ieee80211_is_data_qos(fc)) {
   1760		il->stations[sta_id].tid[tid].tfds_in_queue++;
   1761		if (!ieee80211_has_morefrags(fc))
   1762			il->stations[sta_id].tid[tid].seq_number = seq_number;
   1763	}
   1764
   1765	spin_unlock(&il->sta_lock);
   1766
   1767	txq->skbs[q->write_ptr] = skb;
   1768
   1769	/* Set up first empty entry in queue's array of Tx/cmd buffers */
   1770	out_cmd = txq->cmd[q->write_ptr];
   1771	out_meta = &txq->meta[q->write_ptr];
   1772	tx_cmd = &out_cmd->cmd.tx;
   1773	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
   1774	memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
   1775
   1776	/*
   1777	 * Set up the Tx-command (not MAC!) header.
   1778	 * Store the chosen Tx queue and TFD idx within the sequence field;
   1779	 * after Tx, uCode's Tx response will return this value so driver can
   1780	 * locate the frame within the tx queue and do post-tx processing.
   1781	 */
   1782	out_cmd->hdr.cmd = C_TX;
   1783	out_cmd->hdr.sequence =
   1784	    cpu_to_le16((u16)
   1785			(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
   1786
   1787	/* Copy MAC header from skb into command buffer */
   1788	memcpy(tx_cmd->hdr, hdr, hdr_len);
   1789
   1790	/* Total # bytes to be transmitted */
   1791	tx_cmd->len = cpu_to_le16((u16) skb->len);
   1792
   1793	if (info->control.hw_key)
   1794		il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
   1795
   1796	/* TODO need this for burst mode later on */
   1797	il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
   1798
   1799	il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
   1800
   1801	/*
   1802	 * Use the first empty entry in this queue's command buffer array
   1803	 * to contain the Tx command and MAC header concatenated together
   1804	 * (payload data will be in another buffer).
   1805	 * Size of this varies, due to varying MAC header length.
   1806	 * If end is not dword aligned, we'll have 2 extra bytes at the end
   1807	 * of the MAC header (device reads on dword boundaries).
   1808	 * We'll tell device about this padding later.
   1809	 */
   1810	len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
   1811	firstlen = (len + 3) & ~3;
   1812
   1813	/* Tell NIC about any 2-byte padding after MAC header */
   1814	if (firstlen != len)
   1815		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
   1816
   1817	/* Physical address of this Tx command's header (not MAC header!),
   1818	 * within command buffer array. */
   1819	txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen,
   1820				    DMA_BIDIRECTIONAL);
   1821	if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys)))
   1822		goto drop_unlock;
   1823
   1824	/* Set up TFD's 2nd entry to point directly to remainder of skb,
   1825	 * if any (802.11 null frames have no payload). */
   1826	secondlen = skb->len - hdr_len;
   1827	if (secondlen > 0) {
   1828		phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len,
   1829					   secondlen, DMA_TO_DEVICE);
   1830		if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr)))
   1831			goto drop_unlock;
   1832	}
   1833
   1834	/* Add buffer containing Tx command and MAC(!) header to TFD's
   1835	 * first entry */
   1836	il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
   1837	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
   1838	dma_unmap_len_set(out_meta, len, firstlen);
   1839	if (secondlen)
   1840		il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
   1841					       0, 0);
   1842
   1843	if (!ieee80211_has_morefrags(hdr->frame_control)) {
   1844		txq->need_update = 1;
   1845	} else {
   1846		wait_write_ptr = 1;
   1847		txq->need_update = 0;
   1848	}
   1849
   1850	scratch_phys =
   1851	    txcmd_phys + sizeof(struct il_cmd_header) +
   1852	    offsetof(struct il_tx_cmd, scratch);
   1853
   1854	/* take back ownership of DMA buffer to enable update */
   1855	dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen,
   1856				DMA_BIDIRECTIONAL);
   1857	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
   1858	tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
   1859
   1860	il_update_stats(il, true, fc, skb->len);
   1861
   1862	D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
   1863	D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
   1864	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
   1865	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
   1866
   1867	/* Set up entry for this TFD in Tx byte-count array */
   1868	if (info->flags & IEEE80211_TX_CTL_AMPDU)
   1869		il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
   1870
   1871	dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen,
   1872				   DMA_BIDIRECTIONAL);
   1873
   1874	/* Tell device the write idx *just past* this latest filled TFD */
   1875	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
   1876	il_txq_update_write_ptr(il, txq);
   1877	spin_unlock_irqrestore(&il->lock, flags);
   1878
   1879	/*
   1880	 * At this point the frame is "transmitted" successfully
   1881	 * and we will get a TX status notification eventually,
   1882	 * regardless of the value of ret. "ret" only indicates
   1883	 * whether or not we should update the write pointer.
   1884	 */
   1885
   1886	/*
   1887	 * Avoid atomic ops if it isn't an associated client.
   1888	 * Also, if this is a packet for aggregation, don't
   1889	 * increase the counter because the ucode will stop
   1890	 * aggregation queues when their respective station
   1891	 * goes to sleep.
   1892	 */
   1893	if (sta_priv && sta_priv->client && !is_agg)
   1894		atomic_inc(&sta_priv->pending_frames);
   1895
   1896	if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
   1897		if (wait_write_ptr) {
   1898			spin_lock_irqsave(&il->lock, flags);
   1899			txq->need_update = 1;
   1900			il_txq_update_write_ptr(il, txq);
   1901			spin_unlock_irqrestore(&il->lock, flags);
   1902		} else {
   1903			il_stop_queue(il, txq);
   1904		}
   1905	}
   1906
   1907	return 0;
   1908
   1909drop_unlock:
   1910	spin_unlock_irqrestore(&il->lock, flags);
   1911	return -1;
   1912}
   1913
   1914static inline int
   1915il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
   1916{
   1917	ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
   1918				       GFP_KERNEL);
   1919	if (!ptr->addr)
   1920		return -ENOMEM;
   1921	ptr->size = size;
   1922	return 0;
   1923}
   1924
   1925static inline void
   1926il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
   1927{
   1928	if (unlikely(!ptr->addr))
   1929		return;
   1930
   1931	dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
   1932	memset(ptr, 0, sizeof(*ptr));
   1933}
   1934
   1935/*
   1936 * il4965_hw_txq_ctx_free - Free TXQ Context
   1937 *
   1938 * Destroy all TX DMA queues and structures
   1939 */
   1940void
   1941il4965_hw_txq_ctx_free(struct il_priv *il)
   1942{
   1943	int txq_id;
   1944
   1945	/* Tx queues */
   1946	if (il->txq) {
   1947		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
   1948			if (txq_id == il->cmd_queue)
   1949				il_cmd_queue_free(il);
   1950			else
   1951				il_tx_queue_free(il, txq_id);
   1952	}
   1953	il4965_free_dma_ptr(il, &il->kw);
   1954
   1955	il4965_free_dma_ptr(il, &il->scd_bc_tbls);
   1956
   1957	/* free tx queue structure */
   1958	il_free_txq_mem(il);
   1959}
   1960
   1961/*
   1962 * il4965_txq_ctx_alloc - allocate TX queue context
   1963 * Allocate all Tx DMA structures and initialize them
   1964 */
   1965int
   1966il4965_txq_ctx_alloc(struct il_priv *il)
   1967{
   1968	int ret, txq_id;
   1969	unsigned long flags;
   1970
   1971	/* Free all tx/cmd queues and keep-warm buffer */
   1972	il4965_hw_txq_ctx_free(il);
   1973
   1974	ret =
   1975	    il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
   1976				 il->hw_params.scd_bc_tbls_size);
   1977	if (ret) {
   1978		IL_ERR("Scheduler BC Table allocation failed\n");
   1979		goto error_bc_tbls;
   1980	}
   1981	/* Alloc keep-warm buffer */
   1982	ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
   1983	if (ret) {
   1984		IL_ERR("Keep Warm allocation failed\n");
   1985		goto error_kw;
   1986	}
   1987
   1988	/* allocate tx queue structure */
   1989	ret = il_alloc_txq_mem(il);
   1990	if (ret)
   1991		goto error;
   1992
   1993	spin_lock_irqsave(&il->lock, flags);
   1994
   1995	/* Turn off all Tx DMA fifos */
   1996	il4965_txq_set_sched(il, 0);
   1997
   1998	/* Tell NIC where to find the "keep warm" buffer */
   1999	il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
   2000
   2001	spin_unlock_irqrestore(&il->lock, flags);
   2002
   2003	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
   2004	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
   2005		ret = il_tx_queue_init(il, txq_id);
   2006		if (ret) {
   2007			IL_ERR("Tx %d queue init failed\n", txq_id);
   2008			goto error;
   2009		}
   2010	}
   2011
   2012	return ret;
   2013
   2014error:
   2015	il4965_hw_txq_ctx_free(il);
   2016	il4965_free_dma_ptr(il, &il->kw);
   2017error_kw:
   2018	il4965_free_dma_ptr(il, &il->scd_bc_tbls);
   2019error_bc_tbls:
   2020	return ret;
   2021}
   2022
   2023void
   2024il4965_txq_ctx_reset(struct il_priv *il)
   2025{
   2026	int txq_id;
   2027	unsigned long flags;
   2028
   2029	spin_lock_irqsave(&il->lock, flags);
   2030
   2031	/* Turn off all Tx DMA fifos */
   2032	il4965_txq_set_sched(il, 0);
   2033	/* Tell NIC where to find the "keep warm" buffer */
   2034	il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
   2035
   2036	spin_unlock_irqrestore(&il->lock, flags);
   2037
   2038	/* Alloc and init all Tx queues, including the command queue (#4) */
   2039	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
   2040		il_tx_queue_reset(il, txq_id);
   2041}
   2042
   2043static void
   2044il4965_txq_ctx_unmap(struct il_priv *il)
   2045{
   2046	int txq_id;
   2047
   2048	if (!il->txq)
   2049		return;
   2050
   2051	/* Unmap DMA from host system and free skb's */
   2052	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
   2053		if (txq_id == il->cmd_queue)
   2054			il_cmd_queue_unmap(il);
   2055		else
   2056			il_tx_queue_unmap(il, txq_id);
   2057}
   2058
   2059/*
   2060 * il4965_txq_ctx_stop - Stop all Tx DMA channels
   2061 */
   2062void
   2063il4965_txq_ctx_stop(struct il_priv *il)
   2064{
   2065	int ch, ret;
   2066
   2067	_il_wr_prph(il, IL49_SCD_TXFACT, 0);
   2068
   2069	/* Stop each Tx DMA channel, and wait for it to be idle */
   2070	for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
   2071		_il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
   2072		ret =
   2073		    _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
   2074				 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
   2075				 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
   2076				 1000);
   2077		if (ret < 0)
   2078			IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
   2079			       ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
   2080	}
   2081}
   2082
   2083/*
   2084 * Find first available (lowest unused) Tx Queue, mark it "active".
   2085 * Called only when finding queue for aggregation.
   2086 * Should never return anything < 7, because they should already
   2087 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
   2088 */
   2089static int
   2090il4965_txq_ctx_activate_free(struct il_priv *il)
   2091{
   2092	int txq_id;
   2093
   2094	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
   2095		if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
   2096			return txq_id;
   2097	return -1;
   2098}
   2099
   2100/*
   2101 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
   2102 */
   2103static void
   2104il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
   2105{
   2106	/* Simply stop the queue, but don't change any configuration;
   2107	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
   2108	il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
   2109		   (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   2110		   (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   2111}
   2112
   2113/*
   2114 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
   2115 */
   2116static int
   2117il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
   2118{
   2119	u32 tbl_dw_addr;
   2120	u32 tbl_dw;
   2121	u16 scd_q2ratid;
   2122
   2123	scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
   2124
   2125	tbl_dw_addr =
   2126	    il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
   2127
   2128	tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
   2129
   2130	if (txq_id & 0x1)
   2131		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
   2132	else
   2133		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
   2134
   2135	il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
   2136
   2137	return 0;
   2138}
   2139
   2140/*
   2141 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
   2142 *
   2143 * NOTE:  txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
   2144 *        i.e. it must be one of the higher queues used for aggregation
   2145 */
   2146static int
   2147il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
   2148		      int tid, u16 ssn_idx)
   2149{
   2150	unsigned long flags;
   2151	u16 ra_tid;
   2152	int ret;
   2153
   2154	if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
   2155	    (IL49_FIRST_AMPDU_QUEUE +
   2156	     il->cfg->num_of_ampdu_queues <= txq_id)) {
   2157		IL_WARN("queue number out of range: %d, must be %d to %d\n",
   2158			txq_id, IL49_FIRST_AMPDU_QUEUE,
   2159			IL49_FIRST_AMPDU_QUEUE +
   2160			il->cfg->num_of_ampdu_queues - 1);
   2161		return -EINVAL;
   2162	}
   2163
   2164	ra_tid = BUILD_RAxTID(sta_id, tid);
   2165
   2166	/* Modify device's station table to Tx this TID */
   2167	ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
   2168	if (ret)
   2169		return ret;
   2170
   2171	spin_lock_irqsave(&il->lock, flags);
   2172
   2173	/* Stop this Tx queue before configuring it */
   2174	il4965_tx_queue_stop_scheduler(il, txq_id);
   2175
   2176	/* Map receiver-address / traffic-ID to this queue */
   2177	il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
   2178
   2179	/* Set this queue as a chain-building queue */
   2180	il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
   2181
   2182	/* Place first TFD at idx corresponding to start sequence number.
   2183	 * Assumes that ssn_idx is valid (!= 0xFFF) */
   2184	il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
   2185	il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
   2186	il4965_set_wr_ptrs(il, txq_id, ssn_idx);
   2187
   2188	/* Set up Tx win size and frame limit for this queue */
   2189	il_write_targ_mem(il,
   2190			  il->scd_base_addr +
   2191			  IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
   2192			  (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
   2193			  & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
   2194
   2195	il_write_targ_mem(il,
   2196			  il->scd_base_addr +
   2197			  IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
   2198			  (SCD_FRAME_LIMIT <<
   2199			   IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   2200			  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
   2201
   2202	il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
   2203
   2204	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
   2205	il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
   2206
   2207	spin_unlock_irqrestore(&il->lock, flags);
   2208
   2209	return 0;
   2210}
   2211
   2212int
   2213il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
   2214		    struct ieee80211_sta *sta, u16 tid, u16 * ssn)
   2215{
   2216	int sta_id;
   2217	int tx_fifo;
   2218	int txq_id;
   2219	int ret;
   2220	unsigned long flags;
   2221	struct il_tid_data *tid_data;
   2222
   2223	/* FIXME: warning if tx fifo not found ? */
   2224	tx_fifo = il4965_get_fifo_from_tid(tid);
   2225	if (unlikely(tx_fifo < 0))
   2226		return tx_fifo;
   2227
   2228	D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
   2229
   2230	sta_id = il_sta_id(sta);
   2231	if (sta_id == IL_INVALID_STATION) {
   2232		IL_ERR("Start AGG on invalid station\n");
   2233		return -ENXIO;
   2234	}
   2235	if (unlikely(tid >= MAX_TID_COUNT))
   2236		return -EINVAL;
   2237
   2238	if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
   2239		IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
   2240		return -ENXIO;
   2241	}
   2242
   2243	txq_id = il4965_txq_ctx_activate_free(il);
   2244	if (txq_id == -1) {
   2245		IL_ERR("No free aggregation queue available\n");
   2246		return -ENXIO;
   2247	}
   2248
   2249	spin_lock_irqsave(&il->sta_lock, flags);
   2250	tid_data = &il->stations[sta_id].tid[tid];
   2251	*ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
   2252	tid_data->agg.txq_id = txq_id;
   2253	il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
   2254	spin_unlock_irqrestore(&il->sta_lock, flags);
   2255
   2256	ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
   2257	if (ret)
   2258		return ret;
   2259
   2260	spin_lock_irqsave(&il->sta_lock, flags);
   2261	tid_data = &il->stations[sta_id].tid[tid];
   2262	if (tid_data->tfds_in_queue == 0) {
   2263		D_HT("HW queue is empty\n");
   2264		tid_data->agg.state = IL_AGG_ON;
   2265		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
   2266	} else {
   2267		D_HT("HW queue is NOT empty: %d packets in HW queue\n",
   2268		     tid_data->tfds_in_queue);
   2269		tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
   2270	}
   2271	spin_unlock_irqrestore(&il->sta_lock, flags);
   2272	return ret;
   2273}
   2274
   2275/*
   2276 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
   2277 * il->lock must be held by the caller
   2278 */
   2279static int
   2280il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
   2281{
   2282	if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
   2283	    (IL49_FIRST_AMPDU_QUEUE +
   2284	     il->cfg->num_of_ampdu_queues <= txq_id)) {
   2285		IL_WARN("queue number out of range: %d, must be %d to %d\n",
   2286			txq_id, IL49_FIRST_AMPDU_QUEUE,
   2287			IL49_FIRST_AMPDU_QUEUE +
   2288			il->cfg->num_of_ampdu_queues - 1);
   2289		return -EINVAL;
   2290	}
   2291
   2292	il4965_tx_queue_stop_scheduler(il, txq_id);
   2293
   2294	il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
   2295
   2296	il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
   2297	il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
   2298	/* supposes that ssn_idx is valid (!= 0xFFF) */
   2299	il4965_set_wr_ptrs(il, txq_id, ssn_idx);
   2300
   2301	il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
   2302	il_txq_ctx_deactivate(il, txq_id);
   2303	il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
   2304
   2305	return 0;
   2306}
   2307
   2308int
   2309il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
   2310		   struct ieee80211_sta *sta, u16 tid)
   2311{
   2312	int tx_fifo_id, txq_id, sta_id, ssn;
   2313	struct il_tid_data *tid_data;
   2314	int write_ptr, read_ptr;
   2315	unsigned long flags;
   2316
   2317	/* FIXME: warning if tx_fifo_id not found ? */
   2318	tx_fifo_id = il4965_get_fifo_from_tid(tid);
   2319	if (unlikely(tx_fifo_id < 0))
   2320		return tx_fifo_id;
   2321
   2322	sta_id = il_sta_id(sta);
   2323
   2324	if (sta_id == IL_INVALID_STATION) {
   2325		IL_ERR("Invalid station for AGG tid %d\n", tid);
   2326		return -ENXIO;
   2327	}
   2328
   2329	spin_lock_irqsave(&il->sta_lock, flags);
   2330
   2331	tid_data = &il->stations[sta_id].tid[tid];
   2332	ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
   2333	txq_id = tid_data->agg.txq_id;
   2334
   2335	switch (il->stations[sta_id].tid[tid].agg.state) {
   2336	case IL_EMPTYING_HW_QUEUE_ADDBA:
   2337		/*
   2338		 * This can happen if the peer stops aggregation
   2339		 * again before we've had a chance to drain the
   2340		 * queue we selected previously, i.e. before the
   2341		 * session was really started completely.
   2342		 */
   2343		D_HT("AGG stop before setup done\n");
   2344		goto turn_off;
   2345	case IL_AGG_ON:
   2346		break;
   2347	default:
   2348		IL_WARN("Stopping AGG while state not ON or starting\n");
   2349	}
   2350
   2351	write_ptr = il->txq[txq_id].q.write_ptr;
   2352	read_ptr = il->txq[txq_id].q.read_ptr;
   2353
   2354	/* The queue is not empty */
   2355	if (write_ptr != read_ptr) {
   2356		D_HT("Stopping a non empty AGG HW QUEUE\n");
   2357		il->stations[sta_id].tid[tid].agg.state =
   2358		    IL_EMPTYING_HW_QUEUE_DELBA;
   2359		spin_unlock_irqrestore(&il->sta_lock, flags);
   2360		return 0;
   2361	}
   2362
   2363	D_HT("HW queue is empty\n");
   2364turn_off:
   2365	il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
   2366
   2367	/* do not restore/save irqs */
   2368	spin_unlock(&il->sta_lock);
   2369	spin_lock(&il->lock);
   2370
   2371	/*
   2372	 * the only reason this call can fail is queue number out of range,
   2373	 * which can happen if uCode is reloaded and all the station
   2374	 * information are lost. if it is outside the range, there is no need
   2375	 * to deactivate the uCode queue, just return "success" to allow
   2376	 *  mac80211 to clean up it own data.
   2377	 */
   2378	il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
   2379	spin_unlock_irqrestore(&il->lock, flags);
   2380
   2381	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
   2382
   2383	return 0;
   2384}
   2385
   2386int
   2387il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
   2388{
   2389	struct il_queue *q = &il->txq[txq_id].q;
   2390	u8 *addr = il->stations[sta_id].sta.sta.addr;
   2391	struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
   2392
   2393	lockdep_assert_held(&il->sta_lock);
   2394
   2395	switch (il->stations[sta_id].tid[tid].agg.state) {
   2396	case IL_EMPTYING_HW_QUEUE_DELBA:
   2397		/* We are reclaiming the last packet of the */
   2398		/* aggregated HW queue */
   2399		if (txq_id == tid_data->agg.txq_id &&
   2400		    q->read_ptr == q->write_ptr) {
   2401			u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
   2402			int tx_fifo = il4965_get_fifo_from_tid(tid);
   2403			D_HT("HW queue empty: continue DELBA flow\n");
   2404			il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
   2405			tid_data->agg.state = IL_AGG_OFF;
   2406			ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
   2407		}
   2408		break;
   2409	case IL_EMPTYING_HW_QUEUE_ADDBA:
   2410		/* We are reclaiming the last packet of the queue */
   2411		if (tid_data->tfds_in_queue == 0) {
   2412			D_HT("HW queue empty: continue ADDBA flow\n");
   2413			tid_data->agg.state = IL_AGG_ON;
   2414			ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
   2415		}
   2416		break;
   2417	}
   2418
   2419	return 0;
   2420}
   2421
   2422static void
   2423il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
   2424{
   2425	struct ieee80211_sta *sta;
   2426	struct il_station_priv *sta_priv;
   2427
   2428	rcu_read_lock();
   2429	sta = ieee80211_find_sta(il->vif, addr1);
   2430	if (sta) {
   2431		sta_priv = (void *)sta->drv_priv;
   2432		/* avoid atomic ops if this isn't a client */
   2433		if (sta_priv->client &&
   2434		    atomic_dec_return(&sta_priv->pending_frames) == 0)
   2435			ieee80211_sta_block_awake(il->hw, sta, false);
   2436	}
   2437	rcu_read_unlock();
   2438}
   2439
   2440static void
   2441il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
   2442{
   2443	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
   2444
   2445	if (!is_agg)
   2446		il4965_non_agg_tx_status(il, hdr->addr1);
   2447
   2448	ieee80211_tx_status_irqsafe(il->hw, skb);
   2449}
   2450
   2451int
   2452il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
   2453{
   2454	struct il_tx_queue *txq = &il->txq[txq_id];
   2455	struct il_queue *q = &txq->q;
   2456	int nfreed = 0;
   2457	struct ieee80211_hdr *hdr;
   2458	struct sk_buff *skb;
   2459
   2460	if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
   2461		IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
   2462		       "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
   2463		       q->write_ptr, q->read_ptr);
   2464		return 0;
   2465	}
   2466
   2467	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
   2468	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
   2469
   2470		skb = txq->skbs[txq->q.read_ptr];
   2471
   2472		if (WARN_ON_ONCE(skb == NULL))
   2473			continue;
   2474
   2475		hdr = (struct ieee80211_hdr *) skb->data;
   2476		if (ieee80211_is_data_qos(hdr->frame_control))
   2477			nfreed++;
   2478
   2479		il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
   2480
   2481		txq->skbs[txq->q.read_ptr] = NULL;
   2482		il->ops->txq_free_tfd(il, txq);
   2483	}
   2484	return nfreed;
   2485}
   2486
   2487/*
   2488 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
   2489 *
   2490 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
   2491 * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
   2492 */
   2493static int
   2494il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
   2495				     struct il_compressed_ba_resp *ba_resp)
   2496{
   2497	int i, sh, ack;
   2498	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
   2499	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
   2500	int successes = 0;
   2501	struct ieee80211_tx_info *info;
   2502	u64 bitmap, sent_bitmap;
   2503
   2504	if (unlikely(!agg->wait_for_ba)) {
   2505		if (unlikely(ba_resp->bitmap))
   2506			IL_ERR("Received BA when not expected\n");
   2507		return -EINVAL;
   2508	}
   2509
   2510	/* Mark that the expected block-ack response arrived */
   2511	agg->wait_for_ba = 0;
   2512	D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
   2513
   2514	/* Calculate shift to align block-ack bits with our Tx win bits */
   2515	sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
   2516	if (sh < 0)		/* tbw something is wrong with indices */
   2517		sh += 0x100;
   2518
   2519	if (agg->frame_count > (64 - sh)) {
   2520		D_TX_REPLY("more frames than bitmap size");
   2521		return -1;
   2522	}
   2523
   2524	/* don't use 64-bit values for now */
   2525	bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
   2526
   2527	/* check for success or failure according to the
   2528	 * transmitted bitmap and block-ack bitmap */
   2529	sent_bitmap = bitmap & agg->bitmap;
   2530
   2531	/* For each frame attempted in aggregation,
   2532	 * update driver's record of tx frame's status. */
   2533	i = 0;
   2534	while (sent_bitmap) {
   2535		ack = sent_bitmap & 1ULL;
   2536		successes += ack;
   2537		D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
   2538			   i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
   2539		sent_bitmap >>= 1;
   2540		++i;
   2541	}
   2542
   2543	D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
   2544
   2545	info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
   2546	memset(&info->status, 0, sizeof(info->status));
   2547	info->flags |= IEEE80211_TX_STAT_ACK;
   2548	info->flags |= IEEE80211_TX_STAT_AMPDU;
   2549	info->status.ampdu_ack_len = successes;
   2550	info->status.ampdu_len = agg->frame_count;
   2551	il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
   2552
   2553	return 0;
   2554}
   2555
   2556static inline bool
   2557il4965_is_tx_success(u32 status)
   2558{
   2559	status &= TX_STATUS_MSK;
   2560	return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
   2561}
   2562
   2563static u8
   2564il4965_find_station(struct il_priv *il, const u8 *addr)
   2565{
   2566	int i;
   2567	int start = 0;
   2568	int ret = IL_INVALID_STATION;
   2569	unsigned long flags;
   2570
   2571	if (il->iw_mode == NL80211_IFTYPE_ADHOC)
   2572		start = IL_STA_ID;
   2573
   2574	if (is_broadcast_ether_addr(addr))
   2575		return il->hw_params.bcast_id;
   2576
   2577	spin_lock_irqsave(&il->sta_lock, flags);
   2578	for (i = start; i < il->hw_params.max_stations; i++)
   2579		if (il->stations[i].used &&
   2580		    ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
   2581			ret = i;
   2582			goto out;
   2583		}
   2584
   2585	D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
   2586
   2587out:
   2588	/*
   2589	 * It may be possible that more commands interacting with stations
   2590	 * arrive before we completed processing the adding of
   2591	 * station
   2592	 */
   2593	if (ret != IL_INVALID_STATION &&
   2594	    (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
   2595	      (il->stations[ret].used & IL_STA_UCODE_INPROGRESS))) {
   2596		IL_ERR("Requested station info for sta %d before ready.\n",
   2597		       ret);
   2598		ret = IL_INVALID_STATION;
   2599	}
   2600	spin_unlock_irqrestore(&il->sta_lock, flags);
   2601	return ret;
   2602}
   2603
   2604static int
   2605il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
   2606{
   2607	if (il->iw_mode == NL80211_IFTYPE_STATION)
   2608		return IL_AP_ID;
   2609	else {
   2610		u8 *da = ieee80211_get_DA(hdr);
   2611
   2612		return il4965_find_station(il, da);
   2613	}
   2614}
   2615
   2616static inline u32
   2617il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
   2618{
   2619	return le32_to_cpup(&tx_resp->u.status +
   2620			    tx_resp->frame_count) & IEEE80211_MAX_SN;
   2621}
   2622
   2623static inline u32
   2624il4965_tx_status_to_mac80211(u32 status)
   2625{
   2626	status &= TX_STATUS_MSK;
   2627
   2628	switch (status) {
   2629	case TX_STATUS_SUCCESS:
   2630	case TX_STATUS_DIRECT_DONE:
   2631		return IEEE80211_TX_STAT_ACK;
   2632	case TX_STATUS_FAIL_DEST_PS:
   2633		return IEEE80211_TX_STAT_TX_FILTERED;
   2634	default:
   2635		return 0;
   2636	}
   2637}
   2638
   2639/*
   2640 * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
   2641 */
   2642static int
   2643il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
   2644			  struct il4965_tx_resp *tx_resp, int txq_id,
   2645			  u16 start_idx)
   2646{
   2647	u16 status;
   2648	struct agg_tx_status *frame_status = tx_resp->u.agg_status;
   2649	struct ieee80211_tx_info *info = NULL;
   2650	struct ieee80211_hdr *hdr = NULL;
   2651	u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
   2652	int i, sh, idx;
   2653	u16 seq;
   2654	if (agg->wait_for_ba)
   2655		D_TX_REPLY("got tx response w/o block-ack\n");
   2656
   2657	agg->frame_count = tx_resp->frame_count;
   2658	agg->start_idx = start_idx;
   2659	agg->rate_n_flags = rate_n_flags;
   2660	agg->bitmap = 0;
   2661
   2662	/* num frames attempted by Tx command */
   2663	if (agg->frame_count == 1) {
   2664		/* Only one frame was attempted; no block-ack will arrive */
   2665		status = le16_to_cpu(frame_status[0].status);
   2666		idx = start_idx;
   2667
   2668		D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
   2669			   agg->frame_count, agg->start_idx, idx);
   2670
   2671		info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
   2672		info->status.rates[0].count = tx_resp->failure_frame + 1;
   2673		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
   2674		info->flags |= il4965_tx_status_to_mac80211(status);
   2675		il4965_hwrate_to_tx_control(il, rate_n_flags, info);
   2676
   2677		D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
   2678			   tx_resp->failure_frame);
   2679		D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
   2680
   2681		agg->wait_for_ba = 0;
   2682	} else {
   2683		/* Two or more frames were attempted; expect block-ack */
   2684		u64 bitmap = 0;
   2685		int start = agg->start_idx;
   2686		struct sk_buff *skb;
   2687
   2688		/* Construct bit-map of pending frames within Tx win */
   2689		for (i = 0; i < agg->frame_count; i++) {
   2690			u16 sc;
   2691			status = le16_to_cpu(frame_status[i].status);
   2692			seq = le16_to_cpu(frame_status[i].sequence);
   2693			idx = SEQ_TO_IDX(seq);
   2694			txq_id = SEQ_TO_QUEUE(seq);
   2695
   2696			if (status &
   2697			    (AGG_TX_STATE_FEW_BYTES_MSK |
   2698			     AGG_TX_STATE_ABORT_MSK))
   2699				continue;
   2700
   2701			D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
   2702				   agg->frame_count, txq_id, idx);
   2703
   2704			skb = il->txq[txq_id].skbs[idx];
   2705			if (WARN_ON_ONCE(skb == NULL))
   2706				return -1;
   2707			hdr = (struct ieee80211_hdr *) skb->data;
   2708
   2709			sc = le16_to_cpu(hdr->seq_ctrl);
   2710			if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
   2711				IL_ERR("BUG_ON idx doesn't match seq control"
   2712				       " idx=%d, seq_idx=%d, seq=%d\n", idx,
   2713				       IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
   2714				return -1;
   2715			}
   2716
   2717			D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
   2718				   IEEE80211_SEQ_TO_SN(sc));
   2719
   2720			sh = idx - start;
   2721			if (sh > 64) {
   2722				sh = (start - idx) + 0xff;
   2723				bitmap = bitmap << sh;
   2724				sh = 0;
   2725				start = idx;
   2726			} else if (sh < -64)
   2727				sh = 0xff - (start - idx);
   2728			else if (sh < 0) {
   2729				sh = start - idx;
   2730				start = idx;
   2731				bitmap = bitmap << sh;
   2732				sh = 0;
   2733			}
   2734			bitmap |= 1ULL << sh;
   2735			D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
   2736				   (unsigned long long)bitmap);
   2737		}
   2738
   2739		agg->bitmap = bitmap;
   2740		agg->start_idx = start;
   2741		D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
   2742			   agg->frame_count, agg->start_idx,
   2743			   (unsigned long long)agg->bitmap);
   2744
   2745		if (bitmap)
   2746			agg->wait_for_ba = 1;
   2747	}
   2748	return 0;
   2749}
   2750
   2751/*
   2752 * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
   2753 */
   2754static void
   2755il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
   2756{
   2757	struct il_rx_pkt *pkt = rxb_addr(rxb);
   2758	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
   2759	int txq_id = SEQ_TO_QUEUE(sequence);
   2760	int idx = SEQ_TO_IDX(sequence);
   2761	struct il_tx_queue *txq = &il->txq[txq_id];
   2762	struct sk_buff *skb;
   2763	struct ieee80211_hdr *hdr;
   2764	struct ieee80211_tx_info *info;
   2765	struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
   2766	u32 status = le32_to_cpu(tx_resp->u.status);
   2767	int tid;
   2768	int sta_id;
   2769	int freed;
   2770	u8 *qc = NULL;
   2771	unsigned long flags;
   2772
   2773	if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
   2774		IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
   2775		       "is out of range [0-%d] %d %d\n", txq_id, idx,
   2776		       txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
   2777		return;
   2778	}
   2779
   2780	txq->time_stamp = jiffies;
   2781
   2782	skb = txq->skbs[txq->q.read_ptr];
   2783	info = IEEE80211_SKB_CB(skb);
   2784	memset(&info->status, 0, sizeof(info->status));
   2785
   2786	hdr = (struct ieee80211_hdr *) skb->data;
   2787	if (ieee80211_is_data_qos(hdr->frame_control)) {
   2788		qc = ieee80211_get_qos_ctl(hdr);
   2789		tid = qc[0] & 0xf;
   2790	}
   2791
   2792	sta_id = il4965_get_ra_sta_id(il, hdr);
   2793	if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
   2794		IL_ERR("Station not known\n");
   2795		return;
   2796	}
   2797
   2798	/*
   2799	 * Firmware will not transmit frame on passive channel, if it not yet
   2800	 * received some valid frame on that channel. When this error happen
   2801	 * we have to wait until firmware will unblock itself i.e. when we
   2802	 * note received beacon or other frame. We unblock queues in
   2803	 * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
   2804	 */
   2805	if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
   2806	    il->iw_mode == NL80211_IFTYPE_STATION) {
   2807		il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
   2808		D_INFO("Stopped queues - RX waiting on passive channel\n");
   2809	}
   2810
   2811	spin_lock_irqsave(&il->sta_lock, flags);
   2812	if (txq->sched_retry) {
   2813		const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
   2814		struct il_ht_agg *agg;
   2815
   2816		if (WARN_ON(!qc))
   2817			goto out;
   2818
   2819		agg = &il->stations[sta_id].tid[tid].agg;
   2820
   2821		il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
   2822
   2823		/* check if BAR is needed */
   2824		if (tx_resp->frame_count == 1 &&
   2825		    !il4965_is_tx_success(status))
   2826			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
   2827
   2828		if (txq->q.read_ptr != (scd_ssn & 0xff)) {
   2829			idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
   2830			D_TX_REPLY("Retry scheduler reclaim scd_ssn "
   2831				   "%d idx %d\n", scd_ssn, idx);
   2832			freed = il4965_tx_queue_reclaim(il, txq_id, idx);
   2833			il4965_free_tfds_in_queue(il, sta_id, tid, freed);
   2834
   2835			if (il->mac80211_registered &&
   2836			    il_queue_space(&txq->q) > txq->q.low_mark &&
   2837			    agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
   2838				il_wake_queue(il, txq);
   2839		}
   2840	} else {
   2841		info->status.rates[0].count = tx_resp->failure_frame + 1;
   2842		info->flags |= il4965_tx_status_to_mac80211(status);
   2843		il4965_hwrate_to_tx_control(il,
   2844					    le32_to_cpu(tx_resp->rate_n_flags),
   2845					    info);
   2846
   2847		D_TX_REPLY("TXQ %d status %s (0x%08x) "
   2848			   "rate_n_flags 0x%x retries %d\n", txq_id,
   2849			   il4965_get_tx_fail_reason(status), status,
   2850			   le32_to_cpu(tx_resp->rate_n_flags),
   2851			   tx_resp->failure_frame);
   2852
   2853		freed = il4965_tx_queue_reclaim(il, txq_id, idx);
   2854		if (qc && likely(sta_id != IL_INVALID_STATION))
   2855			il4965_free_tfds_in_queue(il, sta_id, tid, freed);
   2856		else if (sta_id == IL_INVALID_STATION)
   2857			D_TX_REPLY("Station not known\n");
   2858
   2859		if (il->mac80211_registered &&
   2860		    il_queue_space(&txq->q) > txq->q.low_mark)
   2861			il_wake_queue(il, txq);
   2862	}
   2863out:
   2864	if (qc && likely(sta_id != IL_INVALID_STATION))
   2865		il4965_txq_check_empty(il, sta_id, tid, txq_id);
   2866
   2867	il4965_check_abort_status(il, tx_resp->frame_count, status);
   2868
   2869	spin_unlock_irqrestore(&il->sta_lock, flags);
   2870}
   2871
   2872/*
   2873 * translate ucode response to mac80211 tx status control values
   2874 */
   2875void
   2876il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
   2877			    struct ieee80211_tx_info *info)
   2878{
   2879	struct ieee80211_tx_rate *r = &info->status.rates[0];
   2880
   2881	info->status.antenna =
   2882	    ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
   2883	if (rate_n_flags & RATE_MCS_HT_MSK)
   2884		r->flags |= IEEE80211_TX_RC_MCS;
   2885	if (rate_n_flags & RATE_MCS_GF_MSK)
   2886		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
   2887	if (rate_n_flags & RATE_MCS_HT40_MSK)
   2888		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
   2889	if (rate_n_flags & RATE_MCS_DUP_MSK)
   2890		r->flags |= IEEE80211_TX_RC_DUP_DATA;
   2891	if (rate_n_flags & RATE_MCS_SGI_MSK)
   2892		r->flags |= IEEE80211_TX_RC_SHORT_GI;
   2893	r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
   2894}
   2895
   2896/*
   2897 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
   2898 *
   2899 * Handles block-acknowledge notification from device, which reports success
   2900 * of frames sent via aggregation.
   2901 */
   2902static void
   2903il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
   2904{
   2905	struct il_rx_pkt *pkt = rxb_addr(rxb);
   2906	struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
   2907	struct il_tx_queue *txq = NULL;
   2908	struct il_ht_agg *agg;
   2909	int idx;
   2910	int sta_id;
   2911	int tid;
   2912	unsigned long flags;
   2913
   2914	/* "flow" corresponds to Tx queue */
   2915	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
   2916
   2917	/* "ssn" is start of block-ack Tx win, corresponds to idx
   2918	 * (in Tx queue's circular buffer) of first TFD/frame in win */
   2919	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
   2920
   2921	if (scd_flow >= il->hw_params.max_txq_num) {
   2922		IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
   2923		return;
   2924	}
   2925
   2926	txq = &il->txq[scd_flow];
   2927	sta_id = ba_resp->sta_id;
   2928	tid = ba_resp->tid;
   2929	agg = &il->stations[sta_id].tid[tid].agg;
   2930	if (unlikely(agg->txq_id != scd_flow)) {
   2931		/*
   2932		 * FIXME: this is a uCode bug which need to be addressed,
   2933		 * log the information and return for now!
   2934		 * since it is possible happen very often and in order
   2935		 * not to fill the syslog, don't enable the logging by default
   2936		 */
   2937		D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
   2938			   scd_flow, agg->txq_id);
   2939		return;
   2940	}
   2941
   2942	/* Find idx just before block-ack win */
   2943	idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
   2944
   2945	spin_lock_irqsave(&il->sta_lock, flags);
   2946
   2947	D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
   2948		   agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
   2949		   ba_resp->sta_id);
   2950	D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
   2951		   "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
   2952		   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
   2953		   ba_resp->scd_flow, ba_resp->scd_ssn);
   2954	D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
   2955		   (unsigned long long)agg->bitmap);
   2956
   2957	/* Update driver's record of ACK vs. not for each frame in win */
   2958	il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
   2959
   2960	/* Release all TFDs before the SSN, i.e. all TFDs in front of
   2961	 * block-ack win (we assume that they've been successfully
   2962	 * transmitted ... if not, it's too late anyway). */
   2963	if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
   2964		/* calculate mac80211 ampdu sw queue to wake */
   2965		int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
   2966		il4965_free_tfds_in_queue(il, sta_id, tid, freed);
   2967
   2968		if (il_queue_space(&txq->q) > txq->q.low_mark &&
   2969		    il->mac80211_registered &&
   2970		    agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
   2971			il_wake_queue(il, txq);
   2972
   2973		il4965_txq_check_empty(il, sta_id, tid, scd_flow);
   2974	}
   2975
   2976	spin_unlock_irqrestore(&il->sta_lock, flags);
   2977}
   2978
   2979#ifdef CONFIG_IWLEGACY_DEBUG
   2980const char *
   2981il4965_get_tx_fail_reason(u32 status)
   2982{
   2983#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
   2984#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
   2985
   2986	switch (status & TX_STATUS_MSK) {
   2987	case TX_STATUS_SUCCESS:
   2988		return "SUCCESS";
   2989		TX_STATUS_POSTPONE(DELAY);
   2990		TX_STATUS_POSTPONE(FEW_BYTES);
   2991		TX_STATUS_POSTPONE(QUIET_PERIOD);
   2992		TX_STATUS_POSTPONE(CALC_TTAK);
   2993		TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
   2994		TX_STATUS_FAIL(SHORT_LIMIT);
   2995		TX_STATUS_FAIL(LONG_LIMIT);
   2996		TX_STATUS_FAIL(FIFO_UNDERRUN);
   2997		TX_STATUS_FAIL(DRAIN_FLOW);
   2998		TX_STATUS_FAIL(RFKILL_FLUSH);
   2999		TX_STATUS_FAIL(LIFE_EXPIRE);
   3000		TX_STATUS_FAIL(DEST_PS);
   3001		TX_STATUS_FAIL(HOST_ABORTED);
   3002		TX_STATUS_FAIL(BT_RETRY);
   3003		TX_STATUS_FAIL(STA_INVALID);
   3004		TX_STATUS_FAIL(FRAG_DROPPED);
   3005		TX_STATUS_FAIL(TID_DISABLE);
   3006		TX_STATUS_FAIL(FIFO_FLUSHED);
   3007		TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
   3008		TX_STATUS_FAIL(PASSIVE_NO_RX);
   3009		TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
   3010	}
   3011
   3012	return "UNKNOWN";
   3013
   3014#undef TX_STATUS_FAIL
   3015#undef TX_STATUS_POSTPONE
   3016}
   3017#endif /* CONFIG_IWLEGACY_DEBUG */
   3018
   3019static struct il_link_quality_cmd *
   3020il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
   3021{
   3022	int i, r;
   3023	struct il_link_quality_cmd *link_cmd;
   3024	u32 rate_flags = 0;
   3025	__le32 rate_n_flags;
   3026
   3027	link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
   3028	if (!link_cmd) {
   3029		IL_ERR("Unable to allocate memory for LQ cmd.\n");
   3030		return NULL;
   3031	}
   3032	/* Set up the rate scaling to start at selected rate, fall back
   3033	 * all the way down to 1M in IEEE order, and then spin on 1M */
   3034	if (il->band == NL80211_BAND_5GHZ)
   3035		r = RATE_6M_IDX;
   3036	else
   3037		r = RATE_1M_IDX;
   3038
   3039	if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
   3040		rate_flags |= RATE_MCS_CCK_MSK;
   3041
   3042	rate_flags |=
   3043	    il4965_first_antenna(il->hw_params.
   3044				 valid_tx_ant) << RATE_MCS_ANT_POS;
   3045	rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
   3046	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
   3047		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
   3048
   3049	link_cmd->general_params.single_stream_ant_msk =
   3050	    il4965_first_antenna(il->hw_params.valid_tx_ant);
   3051
   3052	link_cmd->general_params.dual_stream_ant_msk =
   3053	    il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
   3054							       valid_tx_ant);
   3055	if (!link_cmd->general_params.dual_stream_ant_msk) {
   3056		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
   3057	} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
   3058		link_cmd->general_params.dual_stream_ant_msk =
   3059		    il->hw_params.valid_tx_ant;
   3060	}
   3061
   3062	link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
   3063	link_cmd->agg_params.agg_time_limit =
   3064	    cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
   3065
   3066	link_cmd->sta_id = sta_id;
   3067
   3068	return link_cmd;
   3069}
   3070
   3071/*
   3072 * il4965_add_bssid_station - Add the special IBSS BSSID station
   3073 *
   3074 * Function sleeps.
   3075 */
   3076int
   3077il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
   3078{
   3079	int ret;
   3080	u8 sta_id;
   3081	struct il_link_quality_cmd *link_cmd;
   3082	unsigned long flags;
   3083
   3084	if (sta_id_r)
   3085		*sta_id_r = IL_INVALID_STATION;
   3086
   3087	ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
   3088	if (ret) {
   3089		IL_ERR("Unable to add station %pM\n", addr);
   3090		return ret;
   3091	}
   3092
   3093	if (sta_id_r)
   3094		*sta_id_r = sta_id;
   3095
   3096	spin_lock_irqsave(&il->sta_lock, flags);
   3097	il->stations[sta_id].used |= IL_STA_LOCAL;
   3098	spin_unlock_irqrestore(&il->sta_lock, flags);
   3099
   3100	/* Set up default rate scaling table in device's station table */
   3101	link_cmd = il4965_sta_alloc_lq(il, sta_id);
   3102	if (!link_cmd) {
   3103		IL_ERR("Unable to initialize rate scaling for station %pM.\n",
   3104		       addr);
   3105		return -ENOMEM;
   3106	}
   3107
   3108	ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
   3109	if (ret)
   3110		IL_ERR("Link quality command failed (%d)\n", ret);
   3111
   3112	spin_lock_irqsave(&il->sta_lock, flags);
   3113	il->stations[sta_id].lq = link_cmd;
   3114	spin_unlock_irqrestore(&il->sta_lock, flags);
   3115
   3116	return 0;
   3117}
   3118
   3119static int
   3120il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
   3121{
   3122	int i;
   3123	u8 buff[sizeof(struct il_wep_cmd) +
   3124		sizeof(struct il_wep_key) * WEP_KEYS_MAX];
   3125	struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
   3126	size_t cmd_size = sizeof(struct il_wep_cmd);
   3127	struct il_host_cmd cmd = {
   3128		.id = C_WEPKEY,
   3129		.data = wep_cmd,
   3130		.flags = CMD_SYNC,
   3131	};
   3132	bool not_empty = false;
   3133
   3134	might_sleep();
   3135
   3136	memset(wep_cmd, 0,
   3137	       cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
   3138
   3139	for (i = 0; i < WEP_KEYS_MAX; i++) {
   3140		u8 key_size = il->_4965.wep_keys[i].key_size;
   3141
   3142		wep_cmd->key[i].key_idx = i;
   3143		if (key_size) {
   3144			wep_cmd->key[i].key_offset = i;
   3145			not_empty = true;
   3146		} else
   3147			wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
   3148
   3149		wep_cmd->key[i].key_size = key_size;
   3150		memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
   3151	}
   3152
   3153	wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
   3154	wep_cmd->num_keys = WEP_KEYS_MAX;
   3155
   3156	cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
   3157	cmd.len = cmd_size;
   3158
   3159	if (not_empty || send_if_empty)
   3160		return il_send_cmd(il, &cmd);
   3161	else
   3162		return 0;
   3163}
   3164
   3165int
   3166il4965_restore_default_wep_keys(struct il_priv *il)
   3167{
   3168	lockdep_assert_held(&il->mutex);
   3169
   3170	return il4965_static_wepkey_cmd(il, false);
   3171}
   3172
   3173int
   3174il4965_remove_default_wep_key(struct il_priv *il,
   3175			      struct ieee80211_key_conf *keyconf)
   3176{
   3177	int ret;
   3178	int idx = keyconf->keyidx;
   3179
   3180	lockdep_assert_held(&il->mutex);
   3181
   3182	D_WEP("Removing default WEP key: idx=%d\n", idx);
   3183
   3184	memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
   3185	if (il_is_rfkill(il)) {
   3186		D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
   3187		/* but keys in device are clear anyway so return success */
   3188		return 0;
   3189	}
   3190	ret = il4965_static_wepkey_cmd(il, 1);
   3191	D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
   3192
   3193	return ret;
   3194}
   3195
   3196int
   3197il4965_set_default_wep_key(struct il_priv *il,
   3198			   struct ieee80211_key_conf *keyconf)
   3199{
   3200	int ret;
   3201	int len = keyconf->keylen;
   3202	int idx = keyconf->keyidx;
   3203
   3204	lockdep_assert_held(&il->mutex);
   3205
   3206	if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
   3207		D_WEP("Bad WEP key length %d\n", keyconf->keylen);
   3208		return -EINVAL;
   3209	}
   3210
   3211	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
   3212	keyconf->hw_key_idx = HW_KEY_DEFAULT;
   3213	il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
   3214
   3215	il->_4965.wep_keys[idx].key_size = len;
   3216	memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
   3217
   3218	ret = il4965_static_wepkey_cmd(il, false);
   3219
   3220	D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
   3221	return ret;
   3222}
   3223
   3224static int
   3225il4965_set_wep_dynamic_key_info(struct il_priv *il,
   3226				struct ieee80211_key_conf *keyconf, u8 sta_id)
   3227{
   3228	unsigned long flags;
   3229	__le16 key_flags = 0;
   3230	struct il_addsta_cmd sta_cmd;
   3231
   3232	lockdep_assert_held(&il->mutex);
   3233
   3234	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
   3235
   3236	key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
   3237	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
   3238	key_flags &= ~STA_KEY_FLG_INVALID;
   3239
   3240	if (keyconf->keylen == WEP_KEY_LEN_128)
   3241		key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
   3242
   3243	if (sta_id == il->hw_params.bcast_id)
   3244		key_flags |= STA_KEY_MULTICAST_MSK;
   3245
   3246	spin_lock_irqsave(&il->sta_lock, flags);
   3247
   3248	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
   3249	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
   3250	il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
   3251
   3252	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
   3253
   3254	memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
   3255	       keyconf->keylen);
   3256
   3257	if ((il->stations[sta_id].sta.key.
   3258	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
   3259		il->stations[sta_id].sta.key.key_offset =
   3260		    il_get_free_ucode_key_idx(il);
   3261	/* else, we are overriding an existing key => no need to allocated room
   3262	 * in uCode. */
   3263
   3264	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
   3265	     "no space for a new key");
   3266
   3267	il->stations[sta_id].sta.key.key_flags = key_flags;
   3268	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
   3269	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3270
   3271	memcpy(&sta_cmd, &il->stations[sta_id].sta,
   3272	       sizeof(struct il_addsta_cmd));
   3273	spin_unlock_irqrestore(&il->sta_lock, flags);
   3274
   3275	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
   3276}
   3277
   3278static int
   3279il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
   3280				 struct ieee80211_key_conf *keyconf, u8 sta_id)
   3281{
   3282	unsigned long flags;
   3283	__le16 key_flags = 0;
   3284	struct il_addsta_cmd sta_cmd;
   3285
   3286	lockdep_assert_held(&il->mutex);
   3287
   3288	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
   3289	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
   3290	key_flags &= ~STA_KEY_FLG_INVALID;
   3291
   3292	if (sta_id == il->hw_params.bcast_id)
   3293		key_flags |= STA_KEY_MULTICAST_MSK;
   3294
   3295	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
   3296
   3297	spin_lock_irqsave(&il->sta_lock, flags);
   3298	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
   3299	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
   3300
   3301	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
   3302
   3303	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
   3304
   3305	if ((il->stations[sta_id].sta.key.
   3306	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
   3307		il->stations[sta_id].sta.key.key_offset =
   3308		    il_get_free_ucode_key_idx(il);
   3309	/* else, we are overriding an existing key => no need to allocated room
   3310	 * in uCode. */
   3311
   3312	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
   3313	     "no space for a new key");
   3314
   3315	il->stations[sta_id].sta.key.key_flags = key_flags;
   3316	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
   3317	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3318
   3319	memcpy(&sta_cmd, &il->stations[sta_id].sta,
   3320	       sizeof(struct il_addsta_cmd));
   3321	spin_unlock_irqrestore(&il->sta_lock, flags);
   3322
   3323	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
   3324}
   3325
   3326static int
   3327il4965_set_tkip_dynamic_key_info(struct il_priv *il,
   3328				 struct ieee80211_key_conf *keyconf, u8 sta_id)
   3329{
   3330	unsigned long flags;
   3331	__le16 key_flags = 0;
   3332
   3333	key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
   3334	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
   3335	key_flags &= ~STA_KEY_FLG_INVALID;
   3336
   3337	if (sta_id == il->hw_params.bcast_id)
   3338		key_flags |= STA_KEY_MULTICAST_MSK;
   3339
   3340	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
   3341	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
   3342
   3343	spin_lock_irqsave(&il->sta_lock, flags);
   3344
   3345	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
   3346	il->stations[sta_id].keyinfo.keylen = 16;
   3347
   3348	if ((il->stations[sta_id].sta.key.
   3349	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
   3350		il->stations[sta_id].sta.key.key_offset =
   3351		    il_get_free_ucode_key_idx(il);
   3352	/* else, we are overriding an existing key => no need to allocated room
   3353	 * in uCode. */
   3354
   3355	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
   3356	     "no space for a new key");
   3357
   3358	il->stations[sta_id].sta.key.key_flags = key_flags;
   3359
   3360	/* This copy is acutally not needed: we get the key with each TX */
   3361	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
   3362
   3363	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
   3364
   3365	spin_unlock_irqrestore(&il->sta_lock, flags);
   3366
   3367	return 0;
   3368}
   3369
   3370void
   3371il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
   3372		       struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
   3373{
   3374	u8 sta_id;
   3375	unsigned long flags;
   3376	int i;
   3377
   3378	if (il_scan_cancel(il)) {
   3379		/* cancel scan failed, just live w/ bad key and rely
   3380		   briefly on SW decryption */
   3381		return;
   3382	}
   3383
   3384	sta_id = il_sta_id_or_broadcast(il, sta);
   3385	if (sta_id == IL_INVALID_STATION)
   3386		return;
   3387
   3388	spin_lock_irqsave(&il->sta_lock, flags);
   3389
   3390	il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
   3391
   3392	for (i = 0; i < 5; i++)
   3393		il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
   3394		    cpu_to_le16(phase1key[i]);
   3395
   3396	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
   3397	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3398
   3399	il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
   3400
   3401	spin_unlock_irqrestore(&il->sta_lock, flags);
   3402}
   3403
   3404int
   3405il4965_remove_dynamic_key(struct il_priv *il,
   3406			  struct ieee80211_key_conf *keyconf, u8 sta_id)
   3407{
   3408	unsigned long flags;
   3409	u16 key_flags;
   3410	u8 keyidx;
   3411	struct il_addsta_cmd sta_cmd;
   3412
   3413	lockdep_assert_held(&il->mutex);
   3414
   3415	il->_4965.key_mapping_keys--;
   3416
   3417	spin_lock_irqsave(&il->sta_lock, flags);
   3418	key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
   3419	keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
   3420
   3421	D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
   3422
   3423	if (keyconf->keyidx != keyidx) {
   3424		/* We need to remove a key with idx different that the one
   3425		 * in the uCode. This means that the key we need to remove has
   3426		 * been replaced by another one with different idx.
   3427		 * Don't do anything and return ok
   3428		 */
   3429		spin_unlock_irqrestore(&il->sta_lock, flags);
   3430		return 0;
   3431	}
   3432
   3433	if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
   3434		IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
   3435			key_flags);
   3436		spin_unlock_irqrestore(&il->sta_lock, flags);
   3437		return 0;
   3438	}
   3439
   3440	if (!test_and_clear_bit
   3441	    (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
   3442		IL_ERR("idx %d not used in uCode key table.\n",
   3443		       il->stations[sta_id].sta.key.key_offset);
   3444	memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
   3445	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
   3446	il->stations[sta_id].sta.key.key_flags =
   3447	    STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
   3448	il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
   3449	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
   3450	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3451
   3452	if (il_is_rfkill(il)) {
   3453		D_WEP
   3454		    ("Not sending C_ADD_STA command because RFKILL enabled.\n");
   3455		spin_unlock_irqrestore(&il->sta_lock, flags);
   3456		return 0;
   3457	}
   3458	memcpy(&sta_cmd, &il->stations[sta_id].sta,
   3459	       sizeof(struct il_addsta_cmd));
   3460	spin_unlock_irqrestore(&il->sta_lock, flags);
   3461
   3462	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
   3463}
   3464
   3465int
   3466il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
   3467		       u8 sta_id)
   3468{
   3469	int ret;
   3470
   3471	lockdep_assert_held(&il->mutex);
   3472
   3473	il->_4965.key_mapping_keys++;
   3474	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
   3475
   3476	switch (keyconf->cipher) {
   3477	case WLAN_CIPHER_SUITE_CCMP:
   3478		ret =
   3479		    il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
   3480		break;
   3481	case WLAN_CIPHER_SUITE_TKIP:
   3482		ret =
   3483		    il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
   3484		break;
   3485	case WLAN_CIPHER_SUITE_WEP40:
   3486	case WLAN_CIPHER_SUITE_WEP104:
   3487		ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
   3488		break;
   3489	default:
   3490		IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
   3491		       keyconf->cipher);
   3492		ret = -EINVAL;
   3493	}
   3494
   3495	D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
   3496	      keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
   3497
   3498	return ret;
   3499}
   3500
   3501/*
   3502 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
   3503 *
   3504 * This adds the broadcast station into the driver's station table
   3505 * and marks it driver active, so that it will be restored to the
   3506 * device at the next best time.
   3507 */
   3508int
   3509il4965_alloc_bcast_station(struct il_priv *il)
   3510{
   3511	struct il_link_quality_cmd *link_cmd;
   3512	unsigned long flags;
   3513	u8 sta_id;
   3514
   3515	spin_lock_irqsave(&il->sta_lock, flags);
   3516	sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
   3517	if (sta_id == IL_INVALID_STATION) {
   3518		IL_ERR("Unable to prepare broadcast station\n");
   3519		spin_unlock_irqrestore(&il->sta_lock, flags);
   3520
   3521		return -EINVAL;
   3522	}
   3523
   3524	il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
   3525	il->stations[sta_id].used |= IL_STA_BCAST;
   3526	spin_unlock_irqrestore(&il->sta_lock, flags);
   3527
   3528	link_cmd = il4965_sta_alloc_lq(il, sta_id);
   3529	if (!link_cmd) {
   3530		IL_ERR
   3531		    ("Unable to initialize rate scaling for bcast station.\n");
   3532		return -ENOMEM;
   3533	}
   3534
   3535	spin_lock_irqsave(&il->sta_lock, flags);
   3536	il->stations[sta_id].lq = link_cmd;
   3537	spin_unlock_irqrestore(&il->sta_lock, flags);
   3538
   3539	return 0;
   3540}
   3541
   3542/*
   3543 * il4965_update_bcast_station - update broadcast station's LQ command
   3544 *
   3545 * Only used by iwl4965. Placed here to have all bcast station management
   3546 * code together.
   3547 */
   3548static int
   3549il4965_update_bcast_station(struct il_priv *il)
   3550{
   3551	unsigned long flags;
   3552	struct il_link_quality_cmd *link_cmd;
   3553	u8 sta_id = il->hw_params.bcast_id;
   3554
   3555	link_cmd = il4965_sta_alloc_lq(il, sta_id);
   3556	if (!link_cmd) {
   3557		IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
   3558		return -ENOMEM;
   3559	}
   3560
   3561	spin_lock_irqsave(&il->sta_lock, flags);
   3562	if (il->stations[sta_id].lq)
   3563		kfree(il->stations[sta_id].lq);
   3564	else
   3565		D_INFO("Bcast sta rate scaling has not been initialized.\n");
   3566	il->stations[sta_id].lq = link_cmd;
   3567	spin_unlock_irqrestore(&il->sta_lock, flags);
   3568
   3569	return 0;
   3570}
   3571
   3572int
   3573il4965_update_bcast_stations(struct il_priv *il)
   3574{
   3575	return il4965_update_bcast_station(il);
   3576}
   3577
   3578/*
   3579 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
   3580 */
   3581int
   3582il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
   3583{
   3584	unsigned long flags;
   3585	struct il_addsta_cmd sta_cmd;
   3586
   3587	lockdep_assert_held(&il->mutex);
   3588
   3589	/* Remove "disable" flag, to enable Tx for this TID */
   3590	spin_lock_irqsave(&il->sta_lock, flags);
   3591	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
   3592	il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
   3593	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3594	memcpy(&sta_cmd, &il->stations[sta_id].sta,
   3595	       sizeof(struct il_addsta_cmd));
   3596	spin_unlock_irqrestore(&il->sta_lock, flags);
   3597
   3598	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
   3599}
   3600
   3601int
   3602il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
   3603			u16 ssn)
   3604{
   3605	unsigned long flags;
   3606	int sta_id;
   3607	struct il_addsta_cmd sta_cmd;
   3608
   3609	lockdep_assert_held(&il->mutex);
   3610
   3611	sta_id = il_sta_id(sta);
   3612	if (sta_id == IL_INVALID_STATION)
   3613		return -ENXIO;
   3614
   3615	spin_lock_irqsave(&il->sta_lock, flags);
   3616	il->stations[sta_id].sta.station_flags_msk = 0;
   3617	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
   3618	il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
   3619	il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
   3620	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3621	memcpy(&sta_cmd, &il->stations[sta_id].sta,
   3622	       sizeof(struct il_addsta_cmd));
   3623	spin_unlock_irqrestore(&il->sta_lock, flags);
   3624
   3625	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
   3626}
   3627
   3628int
   3629il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
   3630{
   3631	unsigned long flags;
   3632	int sta_id;
   3633	struct il_addsta_cmd sta_cmd;
   3634
   3635	lockdep_assert_held(&il->mutex);
   3636
   3637	sta_id = il_sta_id(sta);
   3638	if (sta_id == IL_INVALID_STATION) {
   3639		IL_ERR("Invalid station for AGG tid %d\n", tid);
   3640		return -ENXIO;
   3641	}
   3642
   3643	spin_lock_irqsave(&il->sta_lock, flags);
   3644	il->stations[sta_id].sta.station_flags_msk = 0;
   3645	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
   3646	il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
   3647	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3648	memcpy(&sta_cmd, &il->stations[sta_id].sta,
   3649	       sizeof(struct il_addsta_cmd));
   3650	spin_unlock_irqrestore(&il->sta_lock, flags);
   3651
   3652	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
   3653}
   3654
   3655void
   3656il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
   3657{
   3658	unsigned long flags;
   3659
   3660	spin_lock_irqsave(&il->sta_lock, flags);
   3661	il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
   3662	il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
   3663	il->stations[sta_id].sta.sta.modify_mask =
   3664	    STA_MODIFY_SLEEP_TX_COUNT_MSK;
   3665	il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
   3666	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
   3667	il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
   3668	spin_unlock_irqrestore(&il->sta_lock, flags);
   3669
   3670}
   3671
   3672void
   3673il4965_update_chain_flags(struct il_priv *il)
   3674{
   3675	if (il->ops->set_rxon_chain) {
   3676		il->ops->set_rxon_chain(il);
   3677		if (il->active.rx_chain != il->staging.rx_chain)
   3678			il_commit_rxon(il);
   3679	}
   3680}
   3681
   3682static void
   3683il4965_clear_free_frames(struct il_priv *il)
   3684{
   3685	struct list_head *element;
   3686
   3687	D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
   3688
   3689	while (!list_empty(&il->free_frames)) {
   3690		element = il->free_frames.next;
   3691		list_del(element);
   3692		kfree(list_entry(element, struct il_frame, list));
   3693		il->frames_count--;
   3694	}
   3695
   3696	if (il->frames_count) {
   3697		IL_WARN("%d frames still in use.  Did we lose one?\n",
   3698			il->frames_count);
   3699		il->frames_count = 0;
   3700	}
   3701}
   3702
   3703static struct il_frame *
   3704il4965_get_free_frame(struct il_priv *il)
   3705{
   3706	struct il_frame *frame;
   3707	struct list_head *element;
   3708	if (list_empty(&il->free_frames)) {
   3709		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
   3710		if (!frame) {
   3711			IL_ERR("Could not allocate frame!\n");
   3712			return NULL;
   3713		}
   3714
   3715		il->frames_count++;
   3716		return frame;
   3717	}
   3718
   3719	element = il->free_frames.next;
   3720	list_del(element);
   3721	return list_entry(element, struct il_frame, list);
   3722}
   3723
   3724static void
   3725il4965_free_frame(struct il_priv *il, struct il_frame *frame)
   3726{
   3727	memset(frame, 0, sizeof(*frame));
   3728	list_add(&frame->list, &il->free_frames);
   3729}
   3730
   3731static u32
   3732il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
   3733			 int left)
   3734{
   3735	lockdep_assert_held(&il->mutex);
   3736
   3737	if (!il->beacon_skb)
   3738		return 0;
   3739
   3740	if (il->beacon_skb->len > left)
   3741		return 0;
   3742
   3743	memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
   3744
   3745	return il->beacon_skb->len;
   3746}
   3747
   3748/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
   3749static void
   3750il4965_set_beacon_tim(struct il_priv *il,
   3751		      struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
   3752		      u32 frame_size)
   3753{
   3754	u16 tim_idx;
   3755	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
   3756
   3757	/*
   3758	 * The idx is relative to frame start but we start looking at the
   3759	 * variable-length part of the beacon.
   3760	 */
   3761	tim_idx = mgmt->u.beacon.variable - beacon;
   3762
   3763	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
   3764	while ((tim_idx < (frame_size - 2)) &&
   3765	       (beacon[tim_idx] != WLAN_EID_TIM))
   3766		tim_idx += beacon[tim_idx + 1] + 2;
   3767
   3768	/* If TIM field was found, set variables */
   3769	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
   3770		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
   3771		tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
   3772	} else
   3773		IL_WARN("Unable to find TIM Element in beacon\n");
   3774}
   3775
   3776static unsigned int
   3777il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
   3778{
   3779	struct il_tx_beacon_cmd *tx_beacon_cmd;
   3780	u32 frame_size;
   3781	u32 rate_flags;
   3782	u32 rate;
   3783	/*
   3784	 * We have to set up the TX command, the TX Beacon command, and the
   3785	 * beacon contents.
   3786	 */
   3787
   3788	lockdep_assert_held(&il->mutex);
   3789
   3790	if (!il->beacon_enabled) {
   3791		IL_ERR("Trying to build beacon without beaconing enabled\n");
   3792		return 0;
   3793	}
   3794
   3795	/* Initialize memory */
   3796	tx_beacon_cmd = &frame->u.beacon;
   3797	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
   3798
   3799	/* Set up TX beacon contents */
   3800	frame_size =
   3801	    il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
   3802				     sizeof(frame->u) - sizeof(*tx_beacon_cmd));
   3803	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
   3804		return 0;
   3805	if (!frame_size)
   3806		return 0;
   3807
   3808	/* Set up TX command fields */
   3809	tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
   3810	tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
   3811	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
   3812	tx_beacon_cmd->tx.tx_flags =
   3813	    TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
   3814	    TX_CMD_FLG_STA_RATE_MSK;
   3815
   3816	/* Set up TX beacon command fields */
   3817	il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
   3818			      frame_size);
   3819
   3820	/* Set up packet rate and flags */
   3821	rate = il_get_lowest_plcp(il);
   3822	il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
   3823	rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
   3824	if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
   3825		rate_flags |= RATE_MCS_CCK_MSK;
   3826	tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
   3827
   3828	return sizeof(*tx_beacon_cmd) + frame_size;
   3829}
   3830
   3831int
   3832il4965_send_beacon_cmd(struct il_priv *il)
   3833{
   3834	struct il_frame *frame;
   3835	unsigned int frame_size;
   3836	int rc;
   3837
   3838	frame = il4965_get_free_frame(il);
   3839	if (!frame) {
   3840		IL_ERR("Could not obtain free frame buffer for beacon "
   3841		       "command.\n");
   3842		return -ENOMEM;
   3843	}
   3844
   3845	frame_size = il4965_hw_get_beacon_cmd(il, frame);
   3846	if (!frame_size) {
   3847		IL_ERR("Error configuring the beacon command\n");
   3848		il4965_free_frame(il, frame);
   3849		return -EINVAL;
   3850	}
   3851
   3852	rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
   3853
   3854	il4965_free_frame(il, frame);
   3855
   3856	return rc;
   3857}
   3858
   3859static inline dma_addr_t
   3860il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
   3861{
   3862	struct il_tfd_tb *tb = &tfd->tbs[idx];
   3863
   3864	dma_addr_t addr = get_unaligned_le32(&tb->lo);
   3865	if (sizeof(dma_addr_t) > sizeof(u32))
   3866		addr |=
   3867		    ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
   3868		    16;
   3869
   3870	return addr;
   3871}
   3872
   3873static inline u16
   3874il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
   3875{
   3876	struct il_tfd_tb *tb = &tfd->tbs[idx];
   3877
   3878	return le16_to_cpu(tb->hi_n_len) >> 4;
   3879}
   3880
   3881static inline void
   3882il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
   3883{
   3884	struct il_tfd_tb *tb = &tfd->tbs[idx];
   3885	u16 hi_n_len = len << 4;
   3886
   3887	put_unaligned_le32(addr, &tb->lo);
   3888	if (sizeof(dma_addr_t) > sizeof(u32))
   3889		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
   3890
   3891	tb->hi_n_len = cpu_to_le16(hi_n_len);
   3892
   3893	tfd->num_tbs = idx + 1;
   3894}
   3895
   3896static inline u8
   3897il4965_tfd_get_num_tbs(struct il_tfd *tfd)
   3898{
   3899	return tfd->num_tbs & 0x1f;
   3900}
   3901
   3902/*
   3903 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
   3904 *
   3905 * Does NOT advance any TFD circular buffer read/write idxes
   3906 * Does NOT free the TFD itself (which is within circular buffer)
   3907 */
   3908void
   3909il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
   3910{
   3911	struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
   3912	struct il_tfd *tfd;
   3913	struct pci_dev *dev = il->pci_dev;
   3914	int idx = txq->q.read_ptr;
   3915	int i;
   3916	int num_tbs;
   3917
   3918	tfd = &tfd_tmp[idx];
   3919
   3920	/* Sanity check on number of chunks */
   3921	num_tbs = il4965_tfd_get_num_tbs(tfd);
   3922
   3923	if (num_tbs >= IL_NUM_OF_TBS) {
   3924		IL_ERR("Too many chunks: %i\n", num_tbs);
   3925		/* @todo issue fatal error, it is quite serious situation */
   3926		return;
   3927	}
   3928
   3929	/* Unmap tx_cmd */
   3930	if (num_tbs)
   3931		dma_unmap_single(&dev->dev,
   3932				 dma_unmap_addr(&txq->meta[idx], mapping),
   3933				 dma_unmap_len(&txq->meta[idx], len),
   3934				 DMA_BIDIRECTIONAL);
   3935
   3936	/* Unmap chunks, if any. */
   3937	for (i = 1; i < num_tbs; i++)
   3938		dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i),
   3939				 il4965_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE);
   3940
   3941	/* free SKB */
   3942	if (txq->skbs) {
   3943		struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
   3944
   3945		/* can be called from irqs-disabled context */
   3946		if (skb) {
   3947			dev_kfree_skb_any(skb);
   3948			txq->skbs[txq->q.read_ptr] = NULL;
   3949		}
   3950	}
   3951}
   3952
   3953int
   3954il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
   3955				dma_addr_t addr, u16 len, u8 reset, u8 pad)
   3956{
   3957	struct il_queue *q;
   3958	struct il_tfd *tfd, *tfd_tmp;
   3959	u32 num_tbs;
   3960
   3961	q = &txq->q;
   3962	tfd_tmp = (struct il_tfd *)txq->tfds;
   3963	tfd = &tfd_tmp[q->write_ptr];
   3964
   3965	if (reset)
   3966		memset(tfd, 0, sizeof(*tfd));
   3967
   3968	num_tbs = il4965_tfd_get_num_tbs(tfd);
   3969
   3970	/* Each TFD can point to a maximum 20 Tx buffers */
   3971	if (num_tbs >= IL_NUM_OF_TBS) {
   3972		IL_ERR("Error can not send more than %d chunks\n",
   3973		       IL_NUM_OF_TBS);
   3974		return -EINVAL;
   3975	}
   3976
   3977	BUG_ON(addr & ~DMA_BIT_MASK(36));
   3978	if (unlikely(addr & ~IL_TX_DMA_MASK))
   3979		IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
   3980
   3981	il4965_tfd_set_tb(tfd, num_tbs, addr, len);
   3982
   3983	return 0;
   3984}
   3985
   3986/*
   3987 * Tell nic where to find circular buffer of Tx Frame Descriptors for
   3988 * given Tx queue, and enable the DMA channel used for that queue.
   3989 *
   3990 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
   3991 * channels supported in hardware.
   3992 */
   3993int
   3994il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
   3995{
   3996	int txq_id = txq->q.id;
   3997
   3998	/* Circular buffer (TFD queue in DRAM) physical base address */
   3999	il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
   4000
   4001	return 0;
   4002}
   4003
   4004/******************************************************************************
   4005 *
   4006 * Generic RX handler implementations
   4007 *
   4008 ******************************************************************************/
   4009static void
   4010il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
   4011{
   4012	struct il_rx_pkt *pkt = rxb_addr(rxb);
   4013	struct il_alive_resp *palive;
   4014	struct delayed_work *pwork;
   4015
   4016	palive = &pkt->u.alive_frame;
   4017
   4018	D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
   4019	       palive->is_valid, palive->ver_type, palive->ver_subtype);
   4020
   4021	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
   4022		D_INFO("Initialization Alive received.\n");
   4023		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
   4024		       sizeof(struct il_init_alive_resp));
   4025		pwork = &il->init_alive_start;
   4026	} else {
   4027		D_INFO("Runtime Alive received.\n");
   4028		memcpy(&il->card_alive, &pkt->u.alive_frame,
   4029		       sizeof(struct il_alive_resp));
   4030		pwork = &il->alive_start;
   4031	}
   4032
   4033	/* We delay the ALIVE response by 5ms to
   4034	 * give the HW RF Kill time to activate... */
   4035	if (palive->is_valid == UCODE_VALID_OK)
   4036		queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
   4037	else
   4038		IL_WARN("uCode did not respond OK.\n");
   4039}
   4040
   4041/*
   4042 * il4965_bg_stats_periodic - Timer callback to queue stats
   4043 *
   4044 * This callback is provided in order to send a stats request.
   4045 *
   4046 * This timer function is continually reset to execute within
   4047 * 60 seconds since the last N_STATS was received.  We need to
   4048 * ensure we receive the stats in order to update the temperature
   4049 * used for calibrating the TXPOWER.
   4050 */
   4051static void
   4052il4965_bg_stats_periodic(struct timer_list *t)
   4053{
   4054	struct il_priv *il = from_timer(il, t, stats_periodic);
   4055
   4056	if (test_bit(S_EXIT_PENDING, &il->status))
   4057		return;
   4058
   4059	/* dont send host command if rf-kill is on */
   4060	if (!il_is_ready_rf(il))
   4061		return;
   4062
   4063	il_send_stats_request(il, CMD_ASYNC, false);
   4064}
   4065
   4066static void
   4067il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
   4068{
   4069	struct il_rx_pkt *pkt = rxb_addr(rxb);
   4070	struct il4965_beacon_notif *beacon =
   4071	    (struct il4965_beacon_notif *)pkt->u.raw;
   4072#ifdef CONFIG_IWLEGACY_DEBUG
   4073	u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
   4074
   4075	D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
   4076	     le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
   4077	     beacon->beacon_notify_hdr.failure_frame,
   4078	     le32_to_cpu(beacon->ibss_mgr_status),
   4079	     le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
   4080#endif
   4081	il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
   4082}
   4083
   4084static void
   4085il4965_perform_ct_kill_task(struct il_priv *il)
   4086{
   4087	unsigned long flags;
   4088
   4089	D_POWER("Stop all queues\n");
   4090
   4091	if (il->mac80211_registered)
   4092		ieee80211_stop_queues(il->hw);
   4093
   4094	_il_wr(il, CSR_UCODE_DRV_GP1_SET,
   4095	       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
   4096	_il_rd(il, CSR_UCODE_DRV_GP1);
   4097
   4098	spin_lock_irqsave(&il->reg_lock, flags);
   4099	if (likely(_il_grab_nic_access(il)))
   4100		_il_release_nic_access(il);
   4101	spin_unlock_irqrestore(&il->reg_lock, flags);
   4102}
   4103
   4104/* Handle notification from uCode that card's power state is changing
   4105 * due to software, hardware, or critical temperature RFKILL */
   4106static void
   4107il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
   4108{
   4109	struct il_rx_pkt *pkt = rxb_addr(rxb);
   4110	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
   4111	unsigned long status = il->status;
   4112
   4113	D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
   4114		  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
   4115		  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
   4116		  (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
   4117
   4118	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
   4119
   4120		_il_wr(il, CSR_UCODE_DRV_GP1_SET,
   4121		       CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   4122
   4123		il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
   4124
   4125		if (!(flags & RXON_CARD_DISABLED)) {
   4126			_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
   4127			       CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   4128			il_wr(il, HBUS_TARG_MBX_C,
   4129			      HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
   4130		}
   4131	}
   4132
   4133	if (flags & CT_CARD_DISABLED)
   4134		il4965_perform_ct_kill_task(il);
   4135
   4136	if (flags & HW_CARD_DISABLED)
   4137		set_bit(S_RFKILL, &il->status);
   4138	else
   4139		clear_bit(S_RFKILL, &il->status);
   4140
   4141	if (!(flags & RXON_CARD_DISABLED))
   4142		il_scan_cancel(il);
   4143
   4144	if ((test_bit(S_RFKILL, &status) !=
   4145	     test_bit(S_RFKILL, &il->status)))
   4146		wiphy_rfkill_set_hw_state(il->hw->wiphy,
   4147					  test_bit(S_RFKILL, &il->status));
   4148	else
   4149		wake_up(&il->wait_command_queue);
   4150}
   4151
   4152/*
   4153 * il4965_setup_handlers - Initialize Rx handler callbacks
   4154 *
   4155 * Setup the RX handlers for each of the reply types sent from the uCode
   4156 * to the host.
   4157 *
   4158 * This function chains into the hardware specific files for them to setup
   4159 * any hardware specific handlers as well.
   4160 */
   4161static void
   4162il4965_setup_handlers(struct il_priv *il)
   4163{
   4164	il->handlers[N_ALIVE] = il4965_hdl_alive;
   4165	il->handlers[N_ERROR] = il_hdl_error;
   4166	il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
   4167	il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
   4168	il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
   4169	il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
   4170	il->handlers[N_BEACON] = il4965_hdl_beacon;
   4171
   4172	/*
   4173	 * The same handler is used for both the REPLY to a discrete
   4174	 * stats request from the host as well as for the periodic
   4175	 * stats notifications (after received beacons) from the uCode.
   4176	 */
   4177	il->handlers[C_STATS] = il4965_hdl_c_stats;
   4178	il->handlers[N_STATS] = il4965_hdl_stats;
   4179
   4180	il_setup_rx_scan_handlers(il);
   4181
   4182	/* status change handler */
   4183	il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
   4184
   4185	il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
   4186	/* Rx handlers */
   4187	il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
   4188	il->handlers[N_RX_MPDU] = il4965_hdl_rx;
   4189	il->handlers[N_RX] = il4965_hdl_rx;
   4190	/* block ack */
   4191	il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
   4192	/* Tx response */
   4193	il->handlers[C_TX] = il4965_hdl_tx;
   4194}
   4195
   4196/*
   4197 * il4965_rx_handle - Main entry function for receiving responses from uCode
   4198 *
   4199 * Uses the il->handlers callback function array to invoke
   4200 * the appropriate handlers, including command responses,
   4201 * frame-received notifications, and other notifications.
   4202 */
   4203void
   4204il4965_rx_handle(struct il_priv *il)
   4205{
   4206	struct il_rx_buf *rxb;
   4207	struct il_rx_pkt *pkt;
   4208	struct il_rx_queue *rxq = &il->rxq;
   4209	u32 r, i;
   4210	int reclaim;
   4211	unsigned long flags;
   4212	u8 fill_rx = 0;
   4213	u32 count = 8;
   4214	int total_empty;
   4215
   4216	/* uCode's read idx (stored in shared DRAM) indicates the last Rx
   4217	 * buffer that the driver may process (last buffer filled by ucode). */
   4218	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
   4219	i = rxq->read;
   4220
   4221	/* Rx interrupt, but nothing sent from uCode */
   4222	if (i == r)
   4223		D_RX("r = %d, i = %d\n", r, i);
   4224
   4225	/* calculate total frames need to be restock after handling RX */
   4226	total_empty = r - rxq->write_actual;
   4227	if (total_empty < 0)
   4228		total_empty += RX_QUEUE_SIZE;
   4229
   4230	if (total_empty > (RX_QUEUE_SIZE / 2))
   4231		fill_rx = 1;
   4232
   4233	while (i != r) {
   4234		int len;
   4235
   4236		rxb = rxq->queue[i];
   4237
   4238		/* If an RXB doesn't have a Rx queue slot associated with it,
   4239		 * then a bug has been introduced in the queue refilling
   4240		 * routines -- catch it here */
   4241		BUG_ON(rxb == NULL);
   4242
   4243		rxq->queue[i] = NULL;
   4244
   4245		dma_unmap_page(&il->pci_dev->dev, rxb->page_dma,
   4246			       PAGE_SIZE << il->hw_params.rx_page_order,
   4247			       DMA_FROM_DEVICE);
   4248		pkt = rxb_addr(rxb);
   4249
   4250		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
   4251		len += sizeof(u32);	/* account for status word */
   4252
   4253		reclaim = il_need_reclaim(il, pkt);
   4254
   4255		/* Based on type of command response or notification,
   4256		 *   handle those that need handling via function in
   4257		 *   handlers table.  See il4965_setup_handlers() */
   4258		if (il->handlers[pkt->hdr.cmd]) {
   4259			D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
   4260			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
   4261			il->isr_stats.handlers[pkt->hdr.cmd]++;
   4262			il->handlers[pkt->hdr.cmd] (il, rxb);
   4263		} else {
   4264			/* No handling needed */
   4265			D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
   4266			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
   4267		}
   4268
   4269		/*
   4270		 * XXX: After here, we should always check rxb->page
   4271		 * against NULL before touching it or its virtual
   4272		 * memory (pkt). Because some handler might have
   4273		 * already taken or freed the pages.
   4274		 */
   4275
   4276		if (reclaim) {
   4277			/* Invoke any callbacks, transfer the buffer to caller,
   4278			 * and fire off the (possibly) blocking il_send_cmd()
   4279			 * as we reclaim the driver command queue */
   4280			if (rxb->page)
   4281				il_tx_cmd_complete(il, rxb);
   4282			else
   4283				IL_WARN("Claim null rxb?\n");
   4284		}
   4285
   4286		/* Reuse the page if possible. For notification packets and
   4287		 * SKBs that fail to Rx correctly, add them back into the
   4288		 * rx_free list for reuse later. */
   4289		spin_lock_irqsave(&rxq->lock, flags);
   4290		if (rxb->page != NULL) {
   4291			rxb->page_dma =
   4292			    dma_map_page(&il->pci_dev->dev, rxb->page, 0,
   4293					 PAGE_SIZE << il->hw_params.rx_page_order,
   4294					 DMA_FROM_DEVICE);
   4295
   4296			if (unlikely(dma_mapping_error(&il->pci_dev->dev,
   4297						       rxb->page_dma))) {
   4298				__il_free_pages(il, rxb->page);
   4299				rxb->page = NULL;
   4300				list_add_tail(&rxb->list, &rxq->rx_used);
   4301			} else {
   4302				list_add_tail(&rxb->list, &rxq->rx_free);
   4303				rxq->free_count++;
   4304			}
   4305		} else
   4306			list_add_tail(&rxb->list, &rxq->rx_used);
   4307
   4308		spin_unlock_irqrestore(&rxq->lock, flags);
   4309
   4310		i = (i + 1) & RX_QUEUE_MASK;
   4311		/* If there are a lot of unused frames,
   4312		 * restock the Rx queue so ucode wont assert. */
   4313		if (fill_rx) {
   4314			count++;
   4315			if (count >= 8) {
   4316				rxq->read = i;
   4317				il4965_rx_replenish_now(il);
   4318				count = 0;
   4319			}
   4320		}
   4321	}
   4322
   4323	/* Backtrack one entry */
   4324	rxq->read = i;
   4325	if (fill_rx)
   4326		il4965_rx_replenish_now(il);
   4327	else
   4328		il4965_rx_queue_restock(il);
   4329}
   4330
   4331/* call this function to flush any scheduled tasklet */
   4332static inline void
   4333il4965_synchronize_irq(struct il_priv *il)
   4334{
   4335	/* wait to make sure we flush pending tasklet */
   4336	synchronize_irq(il->pci_dev->irq);
   4337	tasklet_kill(&il->irq_tasklet);
   4338}
   4339
   4340static void
   4341il4965_irq_tasklet(struct tasklet_struct *t)
   4342{
   4343	struct il_priv *il = from_tasklet(il, t, irq_tasklet);
   4344	u32 inta, handled = 0;
   4345	u32 inta_fh;
   4346	unsigned long flags;
   4347	u32 i;
   4348#ifdef CONFIG_IWLEGACY_DEBUG
   4349	u32 inta_mask;
   4350#endif
   4351
   4352	spin_lock_irqsave(&il->lock, flags);
   4353
   4354	/* Ack/clear/reset pending uCode interrupts.
   4355	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
   4356	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
   4357	inta = _il_rd(il, CSR_INT);
   4358	_il_wr(il, CSR_INT, inta);
   4359
   4360	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
   4361	 * Any new interrupts that happen after this, either while we're
   4362	 * in this tasklet, or later, will show up in next ISR/tasklet. */
   4363	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
   4364	_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
   4365
   4366#ifdef CONFIG_IWLEGACY_DEBUG
   4367	if (il_get_debug_level(il) & IL_DL_ISR) {
   4368		/* just for debug */
   4369		inta_mask = _il_rd(il, CSR_INT_MASK);
   4370		D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
   4371		      inta_mask, inta_fh);
   4372	}
   4373#endif
   4374
   4375	spin_unlock_irqrestore(&il->lock, flags);
   4376
   4377	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
   4378	 * atomic, make sure that inta covers all the interrupts that
   4379	 * we've discovered, even if FH interrupt came in just after
   4380	 * reading CSR_INT. */
   4381	if (inta_fh & CSR49_FH_INT_RX_MASK)
   4382		inta |= CSR_INT_BIT_FH_RX;
   4383	if (inta_fh & CSR49_FH_INT_TX_MASK)
   4384		inta |= CSR_INT_BIT_FH_TX;
   4385
   4386	/* Now service all interrupt bits discovered above. */
   4387	if (inta & CSR_INT_BIT_HW_ERR) {
   4388		IL_ERR("Hardware error detected.  Restarting.\n");
   4389
   4390		/* Tell the device to stop sending interrupts */
   4391		il_disable_interrupts(il);
   4392
   4393		il->isr_stats.hw++;
   4394		il_irq_handle_error(il);
   4395
   4396		handled |= CSR_INT_BIT_HW_ERR;
   4397
   4398		return;
   4399	}
   4400#ifdef CONFIG_IWLEGACY_DEBUG
   4401	if (il_get_debug_level(il) & (IL_DL_ISR)) {
   4402		/* NIC fires this, but we don't use it, redundant with WAKEUP */
   4403		if (inta & CSR_INT_BIT_SCD) {
   4404			D_ISR("Scheduler finished to transmit "
   4405			      "the frame/frames.\n");
   4406			il->isr_stats.sch++;
   4407		}
   4408
   4409		/* Alive notification via Rx interrupt will do the real work */
   4410		if (inta & CSR_INT_BIT_ALIVE) {
   4411			D_ISR("Alive interrupt\n");
   4412			il->isr_stats.alive++;
   4413		}
   4414	}
   4415#endif
   4416	/* Safely ignore these bits for debug checks below */
   4417	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
   4418
   4419	/* HW RF KILL switch toggled */
   4420	if (inta & CSR_INT_BIT_RF_KILL) {
   4421		int hw_rf_kill = 0;
   4422
   4423		if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
   4424			hw_rf_kill = 1;
   4425
   4426		IL_WARN("RF_KILL bit toggled to %s.\n",
   4427			hw_rf_kill ? "disable radio" : "enable radio");
   4428
   4429		il->isr_stats.rfkill++;
   4430
   4431		/* driver only loads ucode once setting the interface up.
   4432		 * the driver allows loading the ucode even if the radio
   4433		 * is killed. Hence update the killswitch state here. The
   4434		 * rfkill handler will care about restarting if needed.
   4435		 */
   4436		if (hw_rf_kill) {
   4437			set_bit(S_RFKILL, &il->status);
   4438		} else {
   4439			clear_bit(S_RFKILL, &il->status);
   4440			il_force_reset(il, true);
   4441		}
   4442		wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
   4443
   4444		handled |= CSR_INT_BIT_RF_KILL;
   4445	}
   4446
   4447	/* Chip got too hot and stopped itself */
   4448	if (inta & CSR_INT_BIT_CT_KILL) {
   4449		IL_ERR("Microcode CT kill error detected.\n");
   4450		il->isr_stats.ctkill++;
   4451		handled |= CSR_INT_BIT_CT_KILL;
   4452	}
   4453
   4454	/* Error detected by uCode */
   4455	if (inta & CSR_INT_BIT_SW_ERR) {
   4456		IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
   4457		       inta);
   4458		il->isr_stats.sw++;
   4459		il_irq_handle_error(il);
   4460		handled |= CSR_INT_BIT_SW_ERR;
   4461	}
   4462
   4463	/*
   4464	 * uCode wakes up after power-down sleep.
   4465	 * Tell device about any new tx or host commands enqueued,
   4466	 * and about any Rx buffers made available while asleep.
   4467	 */
   4468	if (inta & CSR_INT_BIT_WAKEUP) {
   4469		D_ISR("Wakeup interrupt\n");
   4470		il_rx_queue_update_write_ptr(il, &il->rxq);
   4471		for (i = 0; i < il->hw_params.max_txq_num; i++)
   4472			il_txq_update_write_ptr(il, &il->txq[i]);
   4473		il->isr_stats.wakeup++;
   4474		handled |= CSR_INT_BIT_WAKEUP;
   4475	}
   4476
   4477	/* All uCode command responses, including Tx command responses,
   4478	 * Rx "responses" (frame-received notification), and other
   4479	 * notifications from uCode come through here*/
   4480	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
   4481		il4965_rx_handle(il);
   4482		il->isr_stats.rx++;
   4483		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
   4484	}
   4485
   4486	/* This "Tx" DMA channel is used only for loading uCode */
   4487	if (inta & CSR_INT_BIT_FH_TX) {
   4488		D_ISR("uCode load interrupt\n");
   4489		il->isr_stats.tx++;
   4490		handled |= CSR_INT_BIT_FH_TX;
   4491		/* Wake up uCode load routine, now that load is complete */
   4492		il->ucode_write_complete = 1;
   4493		wake_up(&il->wait_command_queue);
   4494	}
   4495
   4496	if (inta & ~handled) {
   4497		IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
   4498		il->isr_stats.unhandled++;
   4499	}
   4500
   4501	if (inta & ~(il->inta_mask)) {
   4502		IL_WARN("Disabled INTA bits 0x%08x were pending\n",
   4503			inta & ~il->inta_mask);
   4504		IL_WARN("   with FH49_INT = 0x%08x\n", inta_fh);
   4505	}
   4506
   4507	/* Re-enable all interrupts */
   4508	/* only Re-enable if disabled by irq */
   4509	if (test_bit(S_INT_ENABLED, &il->status))
   4510		il_enable_interrupts(il);
   4511	/* Re-enable RF_KILL if it occurred */
   4512	else if (handled & CSR_INT_BIT_RF_KILL)
   4513		il_enable_rfkill_int(il);
   4514
   4515#ifdef CONFIG_IWLEGACY_DEBUG
   4516	if (il_get_debug_level(il) & (IL_DL_ISR)) {
   4517		inta = _il_rd(il, CSR_INT);
   4518		inta_mask = _il_rd(il, CSR_INT_MASK);
   4519		inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
   4520		D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
   4521		      "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
   4522	}
   4523#endif
   4524}
   4525
   4526/*****************************************************************************
   4527 *
   4528 * sysfs attributes
   4529 *
   4530 *****************************************************************************/
   4531
   4532#ifdef CONFIG_IWLEGACY_DEBUG
   4533
   4534/*
   4535 * The following adds a new attribute to the sysfs representation
   4536 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
   4537 * used for controlling the debug level.
   4538 *
   4539 * See the level definitions in iwl for details.
   4540 *
   4541 * The debug_level being managed using sysfs below is a per device debug
   4542 * level that is used instead of the global debug level if it (the per
   4543 * device debug level) is set.
   4544 */
   4545static ssize_t
   4546il4965_show_debug_level(struct device *d, struct device_attribute *attr,
   4547			char *buf)
   4548{
   4549	struct il_priv *il = dev_get_drvdata(d);
   4550	return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
   4551}
   4552
   4553static ssize_t
   4554il4965_store_debug_level(struct device *d, struct device_attribute *attr,
   4555			 const char *buf, size_t count)
   4556{
   4557	struct il_priv *il = dev_get_drvdata(d);
   4558	unsigned long val;
   4559	int ret;
   4560
   4561	ret = kstrtoul(buf, 0, &val);
   4562	if (ret)
   4563		IL_ERR("%s is not in hex or decimal form.\n", buf);
   4564	else
   4565		il->debug_level = val;
   4566
   4567	return strnlen(buf, count);
   4568}
   4569
   4570static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level,
   4571		   il4965_store_debug_level);
   4572
   4573#endif /* CONFIG_IWLEGACY_DEBUG */
   4574
   4575static ssize_t
   4576il4965_show_temperature(struct device *d, struct device_attribute *attr,
   4577			char *buf)
   4578{
   4579	struct il_priv *il = dev_get_drvdata(d);
   4580
   4581	if (!il_is_alive(il))
   4582		return -EAGAIN;
   4583
   4584	return sprintf(buf, "%d\n", il->temperature);
   4585}
   4586
   4587static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL);
   4588
   4589static ssize_t
   4590il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
   4591{
   4592	struct il_priv *il = dev_get_drvdata(d);
   4593
   4594	if (!il_is_ready_rf(il))
   4595		return sprintf(buf, "off\n");
   4596	else
   4597		return sprintf(buf, "%d\n", il->tx_power_user_lmt);
   4598}
   4599
   4600static ssize_t
   4601il4965_store_tx_power(struct device *d, struct device_attribute *attr,
   4602		      const char *buf, size_t count)
   4603{
   4604	struct il_priv *il = dev_get_drvdata(d);
   4605	unsigned long val;
   4606	int ret;
   4607
   4608	ret = kstrtoul(buf, 10, &val);
   4609	if (ret)
   4610		IL_INFO("%s is not in decimal form.\n", buf);
   4611	else {
   4612		ret = il_set_tx_power(il, val, false);
   4613		if (ret)
   4614			IL_ERR("failed setting tx power (0x%08x).\n", ret);
   4615		else
   4616			ret = count;
   4617	}
   4618	return ret;
   4619}
   4620
   4621static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power,
   4622		   il4965_store_tx_power);
   4623
   4624static struct attribute *il_sysfs_entries[] = {
   4625	&dev_attr_temperature.attr,
   4626	&dev_attr_tx_power.attr,
   4627#ifdef CONFIG_IWLEGACY_DEBUG
   4628	&dev_attr_debug_level.attr,
   4629#endif
   4630	NULL
   4631};
   4632
   4633static const struct attribute_group il_attribute_group = {
   4634	.name = NULL,		/* put in device directory */
   4635	.attrs = il_sysfs_entries,
   4636};
   4637
   4638/******************************************************************************
   4639 *
   4640 * uCode download functions
   4641 *
   4642 ******************************************************************************/
   4643
   4644static void
   4645il4965_dealloc_ucode_pci(struct il_priv *il)
   4646{
   4647	il_free_fw_desc(il->pci_dev, &il->ucode_code);
   4648	il_free_fw_desc(il->pci_dev, &il->ucode_data);
   4649	il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
   4650	il_free_fw_desc(il->pci_dev, &il->ucode_init);
   4651	il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
   4652	il_free_fw_desc(il->pci_dev, &il->ucode_boot);
   4653}
   4654
   4655static void
   4656il4965_nic_start(struct il_priv *il)
   4657{
   4658	/* Remove all resets to allow NIC to operate */
   4659	_il_wr(il, CSR_RESET, 0);
   4660}
   4661
   4662static void il4965_ucode_callback(const struct firmware *ucode_raw,
   4663				  void *context);
   4664static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
   4665
   4666static int __must_check
   4667il4965_request_firmware(struct il_priv *il, bool first)
   4668{
   4669	const char *name_pre = il->cfg->fw_name_pre;
   4670	char tag[8];
   4671
   4672	if (first) {
   4673		il->fw_idx = il->cfg->ucode_api_max;
   4674		sprintf(tag, "%d", il->fw_idx);
   4675	} else {
   4676		il->fw_idx--;
   4677		sprintf(tag, "%d", il->fw_idx);
   4678	}
   4679
   4680	if (il->fw_idx < il->cfg->ucode_api_min) {
   4681		IL_ERR("no suitable firmware found!\n");
   4682		return -ENOENT;
   4683	}
   4684
   4685	sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
   4686
   4687	D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
   4688
   4689	return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
   4690				       &il->pci_dev->dev, GFP_KERNEL, il,
   4691				       il4965_ucode_callback);
   4692}
   4693
   4694struct il4965_firmware_pieces {
   4695	const void *inst, *data, *init, *init_data, *boot;
   4696	size_t inst_size, data_size, init_size, init_data_size, boot_size;
   4697};
   4698
   4699static int
   4700il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
   4701		     struct il4965_firmware_pieces *pieces)
   4702{
   4703	struct il_ucode_header *ucode = (void *)ucode_raw->data;
   4704	u32 api_ver, hdr_size;
   4705	const u8 *src;
   4706
   4707	il->ucode_ver = le32_to_cpu(ucode->ver);
   4708	api_ver = IL_UCODE_API(il->ucode_ver);
   4709
   4710	switch (api_ver) {
   4711	default:
   4712	case 0:
   4713	case 1:
   4714	case 2:
   4715		hdr_size = 24;
   4716		if (ucode_raw->size < hdr_size) {
   4717			IL_ERR("File size too small!\n");
   4718			return -EINVAL;
   4719		}
   4720		pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
   4721		pieces->data_size = le32_to_cpu(ucode->v1.data_size);
   4722		pieces->init_size = le32_to_cpu(ucode->v1.init_size);
   4723		pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
   4724		pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
   4725		src = ucode->v1.data;
   4726		break;
   4727	}
   4728
   4729	/* Verify size of file vs. image size info in file's header */
   4730	if (ucode_raw->size !=
   4731	    hdr_size + pieces->inst_size + pieces->data_size +
   4732	    pieces->init_size + pieces->init_data_size + pieces->boot_size) {
   4733
   4734		IL_ERR("uCode file size %d does not match expected size\n",
   4735		       (int)ucode_raw->size);
   4736		return -EINVAL;
   4737	}
   4738
   4739	pieces->inst = src;
   4740	src += pieces->inst_size;
   4741	pieces->data = src;
   4742	src += pieces->data_size;
   4743	pieces->init = src;
   4744	src += pieces->init_size;
   4745	pieces->init_data = src;
   4746	src += pieces->init_data_size;
   4747	pieces->boot = src;
   4748	src += pieces->boot_size;
   4749
   4750	return 0;
   4751}
   4752
   4753/*
   4754 * il4965_ucode_callback - callback when firmware was loaded
   4755 *
   4756 * If loaded successfully, copies the firmware into buffers
   4757 * for the card to fetch (via DMA).
   4758 */
   4759static void
   4760il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
   4761{
   4762	struct il_priv *il = context;
   4763	int err;
   4764	struct il4965_firmware_pieces pieces;
   4765	const unsigned int api_max = il->cfg->ucode_api_max;
   4766	const unsigned int api_min = il->cfg->ucode_api_min;
   4767	u32 api_ver;
   4768
   4769	u32 max_probe_length = 200;
   4770	u32 standard_phy_calibration_size =
   4771	    IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
   4772
   4773	memset(&pieces, 0, sizeof(pieces));
   4774
   4775	if (!ucode_raw) {
   4776		if (il->fw_idx <= il->cfg->ucode_api_max)
   4777			IL_ERR("request for firmware file '%s' failed.\n",
   4778			       il->firmware_name);
   4779		goto try_again;
   4780	}
   4781
   4782	D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
   4783	       ucode_raw->size);
   4784
   4785	/* Make sure that we got at least the API version number */
   4786	if (ucode_raw->size < 4) {
   4787		IL_ERR("File size way too small!\n");
   4788		goto try_again;
   4789	}
   4790
   4791	/* Data from ucode file:  header followed by uCode images */
   4792	err = il4965_load_firmware(il, ucode_raw, &pieces);
   4793
   4794	if (err)
   4795		goto try_again;
   4796
   4797	api_ver = IL_UCODE_API(il->ucode_ver);
   4798
   4799	/*
   4800	 * api_ver should match the api version forming part of the
   4801	 * firmware filename ... but we don't check for that and only rely
   4802	 * on the API version read from firmware header from here on forward
   4803	 */
   4804	if (api_ver < api_min || api_ver > api_max) {
   4805		IL_ERR("Driver unable to support your firmware API. "
   4806		       "Driver supports v%u, firmware is v%u.\n", api_max,
   4807		       api_ver);
   4808		goto try_again;
   4809	}
   4810
   4811	if (api_ver != api_max)
   4812		IL_ERR("Firmware has old API version. Expected v%u, "
   4813		       "got v%u. New firmware can be obtained "
   4814		       "from http://www.intellinuxwireless.org.\n", api_max,
   4815		       api_ver);
   4816
   4817	IL_INFO("loaded firmware version %u.%u.%u.%u\n",
   4818		IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
   4819		IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
   4820
   4821	snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
   4822		 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
   4823		 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
   4824		 IL_UCODE_SERIAL(il->ucode_ver));
   4825
   4826	/*
   4827	 * For any of the failures below (before allocating pci memory)
   4828	 * we will try to load a version with a smaller API -- maybe the
   4829	 * user just got a corrupted version of the latest API.
   4830	 */
   4831
   4832	D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
   4833	D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size);
   4834	D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size);
   4835	D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size);
   4836	D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size);
   4837	D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size);
   4838
   4839	/* Verify that uCode images will fit in card's SRAM */
   4840	if (pieces.inst_size > il->hw_params.max_inst_size) {
   4841		IL_ERR("uCode instr len %zd too large to fit in\n",
   4842		       pieces.inst_size);
   4843		goto try_again;
   4844	}
   4845
   4846	if (pieces.data_size > il->hw_params.max_data_size) {
   4847		IL_ERR("uCode data len %zd too large to fit in\n",
   4848		       pieces.data_size);
   4849		goto try_again;
   4850	}
   4851
   4852	if (pieces.init_size > il->hw_params.max_inst_size) {
   4853		IL_ERR("uCode init instr len %zd too large to fit in\n",
   4854		       pieces.init_size);
   4855		goto try_again;
   4856	}
   4857
   4858	if (pieces.init_data_size > il->hw_params.max_data_size) {
   4859		IL_ERR("uCode init data len %zd too large to fit in\n",
   4860		       pieces.init_data_size);
   4861		goto try_again;
   4862	}
   4863
   4864	if (pieces.boot_size > il->hw_params.max_bsm_size) {
   4865		IL_ERR("uCode boot instr len %zd too large to fit in\n",
   4866		       pieces.boot_size);
   4867		goto try_again;
   4868	}
   4869
   4870	/* Allocate ucode buffers for card's bus-master loading ... */
   4871
   4872	/* Runtime instructions and 2 copies of data:
   4873	 * 1) unmodified from disk
   4874	 * 2) backup cache for save/restore during power-downs */
   4875	il->ucode_code.len = pieces.inst_size;
   4876	il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
   4877
   4878	il->ucode_data.len = pieces.data_size;
   4879	il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
   4880
   4881	il->ucode_data_backup.len = pieces.data_size;
   4882	il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
   4883
   4884	if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
   4885	    !il->ucode_data_backup.v_addr)
   4886		goto err_pci_alloc;
   4887
   4888	/* Initialization instructions and data */
   4889	if (pieces.init_size && pieces.init_data_size) {
   4890		il->ucode_init.len = pieces.init_size;
   4891		il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
   4892
   4893		il->ucode_init_data.len = pieces.init_data_size;
   4894		il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
   4895
   4896		if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
   4897			goto err_pci_alloc;
   4898	}
   4899
   4900	/* Bootstrap (instructions only, no data) */
   4901	if (pieces.boot_size) {
   4902		il->ucode_boot.len = pieces.boot_size;
   4903		il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
   4904
   4905		if (!il->ucode_boot.v_addr)
   4906			goto err_pci_alloc;
   4907	}
   4908
   4909	/* Now that we can no longer fail, copy information */
   4910
   4911	il->sta_key_max_num = STA_KEY_MAX_NUM;
   4912
   4913	/* Copy images into buffers for card's bus-master reads ... */
   4914
   4915	/* Runtime instructions (first block of data in file) */
   4916	D_INFO("Copying (but not loading) uCode instr len %zd\n",
   4917	       pieces.inst_size);
   4918	memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
   4919
   4920	D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
   4921	       il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
   4922
   4923	/*
   4924	 * Runtime data
   4925	 * NOTE:  Copy into backup buffer will be done in il_up()
   4926	 */
   4927	D_INFO("Copying (but not loading) uCode data len %zd\n",
   4928	       pieces.data_size);
   4929	memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
   4930	memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
   4931
   4932	/* Initialization instructions */
   4933	if (pieces.init_size) {
   4934		D_INFO("Copying (but not loading) init instr len %zd\n",
   4935		       pieces.init_size);
   4936		memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
   4937	}
   4938
   4939	/* Initialization data */
   4940	if (pieces.init_data_size) {
   4941		D_INFO("Copying (but not loading) init data len %zd\n",
   4942		       pieces.init_data_size);
   4943		memcpy(il->ucode_init_data.v_addr, pieces.init_data,
   4944		       pieces.init_data_size);
   4945	}
   4946
   4947	/* Bootstrap instructions */
   4948	D_INFO("Copying (but not loading) boot instr len %zd\n",
   4949	       pieces.boot_size);
   4950	memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
   4951
   4952	/*
   4953	 * figure out the offset of chain noise reset and gain commands
   4954	 * base on the size of standard phy calibration commands table size
   4955	 */
   4956	il->_4965.phy_calib_chain_noise_reset_cmd =
   4957	    standard_phy_calibration_size;
   4958	il->_4965.phy_calib_chain_noise_gain_cmd =
   4959	    standard_phy_calibration_size + 1;
   4960
   4961	/**************************************************
   4962	 * This is still part of probe() in a sense...
   4963	 *
   4964	 * 9. Setup and register with mac80211 and debugfs
   4965	 **************************************************/
   4966	err = il4965_mac_setup_register(il, max_probe_length);
   4967	if (err)
   4968		goto out_unbind;
   4969
   4970	il_dbgfs_register(il, DRV_NAME);
   4971
   4972	err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
   4973	if (err) {
   4974		IL_ERR("failed to create sysfs device attributes\n");
   4975		goto out_unbind;
   4976	}
   4977
   4978	/* We have our copies now, allow OS release its copies */
   4979	release_firmware(ucode_raw);
   4980	complete(&il->_4965.firmware_loading_complete);
   4981	return;
   4982
   4983try_again:
   4984	/* try next, if any */
   4985	if (il4965_request_firmware(il, false))
   4986		goto out_unbind;
   4987	release_firmware(ucode_raw);
   4988	return;
   4989
   4990err_pci_alloc:
   4991	IL_ERR("failed to allocate pci memory\n");
   4992	il4965_dealloc_ucode_pci(il);
   4993out_unbind:
   4994	complete(&il->_4965.firmware_loading_complete);
   4995	device_release_driver(&il->pci_dev->dev);
   4996	release_firmware(ucode_raw);
   4997}
   4998
   4999static const char *const desc_lookup_text[] = {
   5000	"OK",
   5001	"FAIL",
   5002	"BAD_PARAM",
   5003	"BAD_CHECKSUM",
   5004	"NMI_INTERRUPT_WDG",
   5005	"SYSASSERT",
   5006	"FATAL_ERROR",
   5007	"BAD_COMMAND",
   5008	"HW_ERROR_TUNE_LOCK",
   5009	"HW_ERROR_TEMPERATURE",
   5010	"ILLEGAL_CHAN_FREQ",
   5011	"VCC_NOT_STBL",
   5012	"FH49_ERROR",
   5013	"NMI_INTERRUPT_HOST",
   5014	"NMI_INTERRUPT_ACTION_PT",
   5015	"NMI_INTERRUPT_UNKNOWN",
   5016	"UCODE_VERSION_MISMATCH",
   5017	"HW_ERROR_ABS_LOCK",
   5018	"HW_ERROR_CAL_LOCK_FAIL",
   5019	"NMI_INTERRUPT_INST_ACTION_PT",
   5020	"NMI_INTERRUPT_DATA_ACTION_PT",
   5021	"NMI_TRM_HW_ER",
   5022	"NMI_INTERRUPT_TRM",
   5023	"NMI_INTERRUPT_BREAK_POINT",
   5024	"DEBUG_0",
   5025	"DEBUG_1",
   5026	"DEBUG_2",
   5027	"DEBUG_3",
   5028};
   5029
   5030static struct {
   5031	char *name;
   5032	u8 num;
   5033} advanced_lookup[] = {
   5034	{
   5035	"NMI_INTERRUPT_WDG", 0x34}, {
   5036	"SYSASSERT", 0x35}, {
   5037	"UCODE_VERSION_MISMATCH", 0x37}, {
   5038	"BAD_COMMAND", 0x38}, {
   5039	"NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
   5040	"FATAL_ERROR", 0x3D}, {
   5041	"NMI_TRM_HW_ERR", 0x46}, {
   5042	"NMI_INTERRUPT_TRM", 0x4C}, {
   5043	"NMI_INTERRUPT_BREAK_POINT", 0x54}, {
   5044	"NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
   5045	"NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
   5046	"NMI_INTERRUPT_HOST", 0x66}, {
   5047	"NMI_INTERRUPT_ACTION_PT", 0x7C}, {
   5048	"NMI_INTERRUPT_UNKNOWN", 0x84}, {
   5049	"NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
   5050"ADVANCED_SYSASSERT", 0},};
   5051
   5052static const char *
   5053il4965_desc_lookup(u32 num)
   5054{
   5055	int i;
   5056	int max = ARRAY_SIZE(desc_lookup_text);
   5057
   5058	if (num < max)
   5059		return desc_lookup_text[num];
   5060
   5061	max = ARRAY_SIZE(advanced_lookup) - 1;
   5062	for (i = 0; i < max; i++) {
   5063		if (advanced_lookup[i].num == num)
   5064			break;
   5065	}
   5066	return advanced_lookup[i].name;
   5067}
   5068
   5069#define ERROR_START_OFFSET  (1 * sizeof(u32))
   5070#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
   5071
   5072void
   5073il4965_dump_nic_error_log(struct il_priv *il)
   5074{
   5075	u32 data2, line;
   5076	u32 desc, time, count, base, data1;
   5077	u32 blink1, blink2, ilink1, ilink2;
   5078	u32 pc, hcmd;
   5079
   5080	if (il->ucode_type == UCODE_INIT)
   5081		base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
   5082	else
   5083		base = le32_to_cpu(il->card_alive.error_event_table_ptr);
   5084
   5085	if (!il->ops->is_valid_rtc_data_addr(base)) {
   5086		IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
   5087		       base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
   5088		return;
   5089	}
   5090
   5091	count = il_read_targ_mem(il, base);
   5092
   5093	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
   5094		IL_ERR("Start IWL Error Log Dump:\n");
   5095		IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
   5096	}
   5097
   5098	desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
   5099	il->isr_stats.err_code = desc;
   5100	pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
   5101	blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
   5102	blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
   5103	ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
   5104	ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
   5105	data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
   5106	data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
   5107	line = il_read_targ_mem(il, base + 9 * sizeof(u32));
   5108	time = il_read_targ_mem(il, base + 11 * sizeof(u32));
   5109	hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
   5110
   5111	IL_ERR("Desc                                  Time       "
   5112	       "data1      data2      line\n");
   5113	IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
   5114	       il4965_desc_lookup(desc), desc, time, data1, data2, line);
   5115	IL_ERR("pc      blink1  blink2  ilink1  ilink2  hcmd\n");
   5116	IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
   5117	       blink2, ilink1, ilink2, hcmd);
   5118}
   5119
   5120static void
   5121il4965_rf_kill_ct_config(struct il_priv *il)
   5122{
   5123	struct il_ct_kill_config cmd;
   5124	unsigned long flags;
   5125	int ret = 0;
   5126
   5127	spin_lock_irqsave(&il->lock, flags);
   5128	_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
   5129	       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
   5130	spin_unlock_irqrestore(&il->lock, flags);
   5131
   5132	cmd.critical_temperature_R =
   5133	    cpu_to_le32(il->hw_params.ct_kill_threshold);
   5134
   5135	ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
   5136	if (ret)
   5137		IL_ERR("C_CT_KILL_CONFIG failed\n");
   5138	else
   5139		D_INFO("C_CT_KILL_CONFIG " "succeeded, "
   5140		       "critical temperature is %d\n",
   5141		       il->hw_params.ct_kill_threshold);
   5142}
   5143
   5144static const s8 default_queue_to_tx_fifo[] = {
   5145	IL_TX_FIFO_VO,
   5146	IL_TX_FIFO_VI,
   5147	IL_TX_FIFO_BE,
   5148	IL_TX_FIFO_BK,
   5149	IL49_CMD_FIFO_NUM,
   5150	IL_TX_FIFO_UNUSED,
   5151	IL_TX_FIFO_UNUSED,
   5152};
   5153
   5154#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
   5155
   5156static int
   5157il4965_alive_notify(struct il_priv *il)
   5158{
   5159	u32 a;
   5160	unsigned long flags;
   5161	int i, chan;
   5162	u32 reg_val;
   5163
   5164	spin_lock_irqsave(&il->lock, flags);
   5165
   5166	/* Clear 4965's internal Tx Scheduler data base */
   5167	il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
   5168	a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
   5169	for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
   5170		il_write_targ_mem(il, a, 0);
   5171	for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
   5172		il_write_targ_mem(il, a, 0);
   5173	for (;
   5174	     a <
   5175	     il->scd_base_addr +
   5176	     IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
   5177	     a += 4)
   5178		il_write_targ_mem(il, a, 0);
   5179
   5180	/* Tel 4965 where to find Tx byte count tables */
   5181	il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
   5182
   5183	/* Enable DMA channel */
   5184	for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
   5185		il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
   5186		      FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   5187		      FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   5188
   5189	/* Update FH chicken bits */
   5190	reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
   5191	il_wr(il, FH49_TX_CHICKEN_BITS_REG,
   5192	      reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   5193
   5194	/* Disable chain mode for all queues */
   5195	il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
   5196
   5197	/* Initialize each Tx queue (including the command queue) */
   5198	for (i = 0; i < il->hw_params.max_txq_num; i++) {
   5199
   5200		/* TFD circular buffer read/write idxes */
   5201		il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
   5202		il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
   5203
   5204		/* Max Tx Window size for Scheduler-ACK mode */
   5205		il_write_targ_mem(il,
   5206				  il->scd_base_addr +
   5207				  IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
   5208				  (SCD_WIN_SIZE <<
   5209				   IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
   5210				  IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
   5211
   5212		/* Frame limit */
   5213		il_write_targ_mem(il,
   5214				  il->scd_base_addr +
   5215				  IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
   5216				  sizeof(u32),
   5217				  (SCD_FRAME_LIMIT <<
   5218				   IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   5219				  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
   5220
   5221	}
   5222	il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
   5223		   (1 << il->hw_params.max_txq_num) - 1);
   5224
   5225	/* Activate all Tx DMA/FIFO channels */
   5226	il4965_txq_set_sched(il, IL_MASK(0, 6));
   5227
   5228	il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
   5229
   5230	/* make sure all queue are not stopped */
   5231	memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
   5232	for (i = 0; i < 4; i++)
   5233		atomic_set(&il->queue_stop_count[i], 0);
   5234
   5235	/* reset to 0 to enable all the queue first */
   5236	il->txq_ctx_active_msk = 0;
   5237	/* Map each Tx/cmd queue to its corresponding fifo */
   5238	BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
   5239
   5240	for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
   5241		int ac = default_queue_to_tx_fifo[i];
   5242
   5243		il_txq_ctx_activate(il, i);
   5244
   5245		if (ac == IL_TX_FIFO_UNUSED)
   5246			continue;
   5247
   5248		il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
   5249	}
   5250
   5251	spin_unlock_irqrestore(&il->lock, flags);
   5252
   5253	return 0;
   5254}
   5255
   5256/*
   5257 * il4965_alive_start - called after N_ALIVE notification received
   5258 *                   from protocol/runtime uCode (initialization uCode's
   5259 *                   Alive gets handled by il_init_alive_start()).
   5260 */
   5261static void
   5262il4965_alive_start(struct il_priv *il)
   5263{
   5264	int ret = 0;
   5265
   5266	D_INFO("Runtime Alive received.\n");
   5267
   5268	if (il->card_alive.is_valid != UCODE_VALID_OK) {
   5269		/* We had an error bringing up the hardware, so take it
   5270		 * all the way back down so we can try again */
   5271		D_INFO("Alive failed.\n");
   5272		goto restart;
   5273	}
   5274
   5275	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
   5276	 * This is a paranoid check, because we would not have gotten the
   5277	 * "runtime" alive if code weren't properly loaded.  */
   5278	if (il4965_verify_ucode(il)) {
   5279		/* Runtime instruction load was bad;
   5280		 * take it all the way back down so we can try again */
   5281		D_INFO("Bad runtime uCode load.\n");
   5282		goto restart;
   5283	}
   5284
   5285	ret = il4965_alive_notify(il);
   5286	if (ret) {
   5287		IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
   5288		goto restart;
   5289	}
   5290
   5291	/* After the ALIVE response, we can send host commands to the uCode */
   5292	set_bit(S_ALIVE, &il->status);
   5293
   5294	/* Enable watchdog to monitor the driver tx queues */
   5295	il_setup_watchdog(il);
   5296
   5297	if (il_is_rfkill(il))
   5298		return;
   5299
   5300	ieee80211_wake_queues(il->hw);
   5301
   5302	il->active_rate = RATES_MASK;
   5303
   5304	il_power_update_mode(il, true);
   5305	D_INFO("Updated power mode\n");
   5306
   5307	if (il_is_associated(il)) {
   5308		struct il_rxon_cmd *active_rxon =
   5309		    (struct il_rxon_cmd *)&il->active;
   5310		/* apply any changes in staging */
   5311		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
   5312		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
   5313	} else {
   5314		/* Initialize our rx_config data */
   5315		il_connection_init_rx_config(il);
   5316
   5317		if (il->ops->set_rxon_chain)
   5318			il->ops->set_rxon_chain(il);
   5319	}
   5320
   5321	/* Configure bluetooth coexistence if enabled */
   5322	il_send_bt_config(il);
   5323
   5324	il4965_reset_run_time_calib(il);
   5325
   5326	set_bit(S_READY, &il->status);
   5327
   5328	/* Configure the adapter for unassociated operation */
   5329	il_commit_rxon(il);
   5330
   5331	/* At this point, the NIC is initialized and operational */
   5332	il4965_rf_kill_ct_config(il);
   5333
   5334	D_INFO("ALIVE processing complete.\n");
   5335	wake_up(&il->wait_command_queue);
   5336
   5337	return;
   5338
   5339restart:
   5340	queue_work(il->workqueue, &il->restart);
   5341}
   5342
   5343static void il4965_cancel_deferred_work(struct il_priv *il);
   5344
   5345static void
   5346__il4965_down(struct il_priv *il)
   5347{
   5348	unsigned long flags;
   5349	int exit_pending;
   5350
   5351	D_INFO(DRV_NAME " is going down\n");
   5352
   5353	il_scan_cancel_timeout(il, 200);
   5354
   5355	exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
   5356
   5357	/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
   5358	 * to prevent rearm timer */
   5359	del_timer_sync(&il->watchdog);
   5360
   5361	il_clear_ucode_stations(il);
   5362
   5363	/* FIXME: race conditions ? */
   5364	spin_lock_irq(&il->sta_lock);
   5365	/*
   5366	 * Remove all key information that is not stored as part
   5367	 * of station information since mac80211 may not have had
   5368	 * a chance to remove all the keys. When device is
   5369	 * reconfigured by mac80211 after an error all keys will
   5370	 * be reconfigured.
   5371	 */
   5372	memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
   5373	il->_4965.key_mapping_keys = 0;
   5374	spin_unlock_irq(&il->sta_lock);
   5375
   5376	il_dealloc_bcast_stations(il);
   5377	il_clear_driver_stations(il);
   5378
   5379	/* Unblock any waiting calls */
   5380	wake_up_all(&il->wait_command_queue);
   5381
   5382	/* Wipe out the EXIT_PENDING status bit if we are not actually
   5383	 * exiting the module */
   5384	if (!exit_pending)
   5385		clear_bit(S_EXIT_PENDING, &il->status);
   5386
   5387	/* stop and reset the on-board processor */
   5388	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
   5389
   5390	/* tell the device to stop sending interrupts */
   5391	spin_lock_irqsave(&il->lock, flags);
   5392	il_disable_interrupts(il);
   5393	spin_unlock_irqrestore(&il->lock, flags);
   5394	il4965_synchronize_irq(il);
   5395
   5396	if (il->mac80211_registered)
   5397		ieee80211_stop_queues(il->hw);
   5398
   5399	/* If we have not previously called il_init() then
   5400	 * clear all bits but the RF Kill bit and return */
   5401	if (!il_is_init(il)) {
   5402		il->status =
   5403		    test_bit(S_RFKILL, &il->status) << S_RFKILL |
   5404		    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
   5405		    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
   5406		goto exit;
   5407	}
   5408
   5409	/* ...otherwise clear out all the status bits but the RF Kill
   5410	 * bit and continue taking the NIC down. */
   5411	il->status &=
   5412	    test_bit(S_RFKILL, &il->status) << S_RFKILL |
   5413	    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
   5414	    test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
   5415	    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
   5416
   5417	/*
   5418	 * We disabled and synchronized interrupt, and priv->mutex is taken, so
   5419	 * here is the only thread which will program device registers, but
   5420	 * still have lockdep assertions, so we are taking reg_lock.
   5421	 */
   5422	spin_lock_irq(&il->reg_lock);
   5423	/* FIXME: il_grab_nic_access if rfkill is off ? */
   5424
   5425	il4965_txq_ctx_stop(il);
   5426	il4965_rxq_stop(il);
   5427	/* Power-down device's busmaster DMA clocks */
   5428	_il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
   5429	udelay(5);
   5430	/* Make sure (redundant) we've released our request to stay awake */
   5431	_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   5432	/* Stop the device, and put it in low power state */
   5433	_il_apm_stop(il);
   5434
   5435	spin_unlock_irq(&il->reg_lock);
   5436
   5437	il4965_txq_ctx_unmap(il);
   5438exit:
   5439	memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
   5440
   5441	dev_kfree_skb(il->beacon_skb);
   5442	il->beacon_skb = NULL;
   5443
   5444	/* clear out any free frames */
   5445	il4965_clear_free_frames(il);
   5446}
   5447
   5448static void
   5449il4965_down(struct il_priv *il)
   5450{
   5451	mutex_lock(&il->mutex);
   5452	__il4965_down(il);
   5453	mutex_unlock(&il->mutex);
   5454
   5455	il4965_cancel_deferred_work(il);
   5456}
   5457
   5458
   5459static void
   5460il4965_set_hw_ready(struct il_priv *il)
   5461{
   5462	int ret;
   5463
   5464	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
   5465		   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   5466
   5467	/* See if we got it */
   5468	ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
   5469			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   5470			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   5471			   100);
   5472	if (ret >= 0)
   5473		il->hw_ready = true;
   5474
   5475	D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
   5476}
   5477
   5478static void
   5479il4965_prepare_card_hw(struct il_priv *il)
   5480{
   5481	int ret;
   5482
   5483	il->hw_ready = false;
   5484
   5485	il4965_set_hw_ready(il);
   5486	if (il->hw_ready)
   5487		return;
   5488
   5489	/* If HW is not ready, prepare the conditions to check again */
   5490	il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
   5491
   5492	ret =
   5493	    _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
   5494			 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
   5495			 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
   5496
   5497	/* HW should be ready by now, check again. */
   5498	if (ret != -ETIMEDOUT)
   5499		il4965_set_hw_ready(il);
   5500}
   5501
   5502#define MAX_HW_RESTARTS 5
   5503
   5504static int
   5505__il4965_up(struct il_priv *il)
   5506{
   5507	int i;
   5508	int ret;
   5509
   5510	if (test_bit(S_EXIT_PENDING, &il->status)) {
   5511		IL_WARN("Exit pending; will not bring the NIC up\n");
   5512		return -EIO;
   5513	}
   5514
   5515	if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
   5516		IL_ERR("ucode not available for device bringup\n");
   5517		return -EIO;
   5518	}
   5519
   5520	ret = il4965_alloc_bcast_station(il);
   5521	if (ret) {
   5522		il_dealloc_bcast_stations(il);
   5523		return ret;
   5524	}
   5525
   5526	il4965_prepare_card_hw(il);
   5527	if (!il->hw_ready) {
   5528		il_dealloc_bcast_stations(il);
   5529		IL_ERR("HW not ready\n");
   5530		return -EIO;
   5531	}
   5532
   5533	/* If platform's RF_KILL switch is NOT set to KILL */
   5534	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
   5535		clear_bit(S_RFKILL, &il->status);
   5536	else {
   5537		set_bit(S_RFKILL, &il->status);
   5538		wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
   5539
   5540		il_dealloc_bcast_stations(il);
   5541		il_enable_rfkill_int(il);
   5542		IL_WARN("Radio disabled by HW RF Kill switch\n");
   5543		return 0;
   5544	}
   5545
   5546	_il_wr(il, CSR_INT, 0xFFFFFFFF);
   5547
   5548	/* must be initialised before il_hw_nic_init */
   5549	il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
   5550
   5551	ret = il4965_hw_nic_init(il);
   5552	if (ret) {
   5553		IL_ERR("Unable to init nic\n");
   5554		il_dealloc_bcast_stations(il);
   5555		return ret;
   5556	}
   5557
   5558	/* make sure rfkill handshake bits are cleared */
   5559	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
   5560	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   5561
   5562	/* clear (again), then enable host interrupts */
   5563	_il_wr(il, CSR_INT, 0xFFFFFFFF);
   5564	il_enable_interrupts(il);
   5565
   5566	/* really make sure rfkill handshake bits are cleared */
   5567	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
   5568	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
   5569
   5570	/* Copy original ucode data image from disk into backup cache.
   5571	 * This will be used to initialize the on-board processor's
   5572	 * data SRAM for a clean start when the runtime program first loads. */
   5573	memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
   5574	       il->ucode_data.len);
   5575
   5576	for (i = 0; i < MAX_HW_RESTARTS; i++) {
   5577
   5578		/* load bootstrap state machine,
   5579		 * load bootstrap program into processor's memory,
   5580		 * prepare to load the "initialize" uCode */
   5581		ret = il->ops->load_ucode(il);
   5582
   5583		if (ret) {
   5584			IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
   5585			continue;
   5586		}
   5587
   5588		/* start card; "initialize" will load runtime ucode */
   5589		il4965_nic_start(il);
   5590
   5591		D_INFO(DRV_NAME " is coming up\n");
   5592
   5593		return 0;
   5594	}
   5595
   5596	set_bit(S_EXIT_PENDING, &il->status);
   5597	__il4965_down(il);
   5598	clear_bit(S_EXIT_PENDING, &il->status);
   5599
   5600	/* tried to restart and config the device for as long as our
   5601	 * patience could withstand */
   5602	IL_ERR("Unable to initialize device after %d attempts.\n", i);
   5603	return -EIO;
   5604}
   5605
   5606/*****************************************************************************
   5607 *
   5608 * Workqueue callbacks
   5609 *
   5610 *****************************************************************************/
   5611
   5612static void
   5613il4965_bg_init_alive_start(struct work_struct *data)
   5614{
   5615	struct il_priv *il =
   5616	    container_of(data, struct il_priv, init_alive_start.work);
   5617
   5618	mutex_lock(&il->mutex);
   5619	if (test_bit(S_EXIT_PENDING, &il->status))
   5620		goto out;
   5621
   5622	il->ops->init_alive_start(il);
   5623out:
   5624	mutex_unlock(&il->mutex);
   5625}
   5626
   5627static void
   5628il4965_bg_alive_start(struct work_struct *data)
   5629{
   5630	struct il_priv *il =
   5631	    container_of(data, struct il_priv, alive_start.work);
   5632
   5633	mutex_lock(&il->mutex);
   5634	if (test_bit(S_EXIT_PENDING, &il->status))
   5635		goto out;
   5636
   5637	il4965_alive_start(il);
   5638out:
   5639	mutex_unlock(&il->mutex);
   5640}
   5641
   5642static void
   5643il4965_bg_run_time_calib_work(struct work_struct *work)
   5644{
   5645	struct il_priv *il = container_of(work, struct il_priv,
   5646					  run_time_calib_work);
   5647
   5648	mutex_lock(&il->mutex);
   5649
   5650	if (test_bit(S_EXIT_PENDING, &il->status) ||
   5651	    test_bit(S_SCANNING, &il->status)) {
   5652		mutex_unlock(&il->mutex);
   5653		return;
   5654	}
   5655
   5656	if (il->start_calib) {
   5657		il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
   5658		il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
   5659	}
   5660
   5661	mutex_unlock(&il->mutex);
   5662}
   5663
   5664static void
   5665il4965_bg_restart(struct work_struct *data)
   5666{
   5667	struct il_priv *il = container_of(data, struct il_priv, restart);
   5668
   5669	if (test_bit(S_EXIT_PENDING, &il->status))
   5670		return;
   5671
   5672	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
   5673		mutex_lock(&il->mutex);
   5674		il->is_open = 0;
   5675
   5676		__il4965_down(il);
   5677
   5678		mutex_unlock(&il->mutex);
   5679		il4965_cancel_deferred_work(il);
   5680		ieee80211_restart_hw(il->hw);
   5681	} else {
   5682		il4965_down(il);
   5683
   5684		mutex_lock(&il->mutex);
   5685		if (test_bit(S_EXIT_PENDING, &il->status)) {
   5686			mutex_unlock(&il->mutex);
   5687			return;
   5688		}
   5689
   5690		__il4965_up(il);
   5691		mutex_unlock(&il->mutex);
   5692	}
   5693}
   5694
   5695static void
   5696il4965_bg_rx_replenish(struct work_struct *data)
   5697{
   5698	struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
   5699
   5700	if (test_bit(S_EXIT_PENDING, &il->status))
   5701		return;
   5702
   5703	mutex_lock(&il->mutex);
   5704	il4965_rx_replenish(il);
   5705	mutex_unlock(&il->mutex);
   5706}
   5707
   5708/*****************************************************************************
   5709 *
   5710 * mac80211 entry point functions
   5711 *
   5712 *****************************************************************************/
   5713
   5714#define UCODE_READY_TIMEOUT	(4 * HZ)
   5715
   5716/*
   5717 * Not a mac80211 entry point function, but it fits in with all the
   5718 * other mac80211 functions grouped here.
   5719 */
   5720static int
   5721il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
   5722{
   5723	int ret;
   5724	struct ieee80211_hw *hw = il->hw;
   5725
   5726	hw->rate_control_algorithm = "iwl-4965-rs";
   5727
   5728	/* Tell mac80211 our characteristics */
   5729	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
   5730	ieee80211_hw_set(hw, SUPPORTS_PS);
   5731	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
   5732	ieee80211_hw_set(hw, SPECTRUM_MGMT);
   5733	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
   5734	ieee80211_hw_set(hw, SIGNAL_DBM);
   5735	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
   5736	if (il->cfg->sku & IL_SKU_N)
   5737		hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
   5738				       NL80211_FEATURE_STATIC_SMPS;
   5739
   5740	hw->sta_data_size = sizeof(struct il_station_priv);
   5741	hw->vif_data_size = sizeof(struct il_vif_priv);
   5742
   5743	hw->wiphy->interface_modes =
   5744	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
   5745
   5746	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
   5747	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
   5748				       REGULATORY_DISABLE_BEACON_HINTS;
   5749
   5750	/*
   5751	 * For now, disable PS by default because it affects
   5752	 * RX performance significantly.
   5753	 */
   5754	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
   5755
   5756	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
   5757	/* we create the 802.11 header and a zero-length SSID element */
   5758	hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
   5759
   5760	/* Default value; 4 EDCA QOS priorities */
   5761	hw->queues = 4;
   5762
   5763	hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
   5764
   5765	if (il->bands[NL80211_BAND_2GHZ].n_channels)
   5766		il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
   5767		    &il->bands[NL80211_BAND_2GHZ];
   5768	if (il->bands[NL80211_BAND_5GHZ].n_channels)
   5769		il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
   5770		    &il->bands[NL80211_BAND_5GHZ];
   5771
   5772	il_leds_init(il);
   5773
   5774	wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
   5775
   5776	ret = ieee80211_register_hw(il->hw);
   5777	if (ret) {
   5778		IL_ERR("Failed to register hw (error %d)\n", ret);
   5779		return ret;
   5780	}
   5781	il->mac80211_registered = 1;
   5782
   5783	return 0;
   5784}
   5785
   5786int
   5787il4965_mac_start(struct ieee80211_hw *hw)
   5788{
   5789	struct il_priv *il = hw->priv;
   5790	int ret;
   5791
   5792	D_MAC80211("enter\n");
   5793
   5794	/* we should be verifying the device is ready to be opened */
   5795	mutex_lock(&il->mutex);
   5796	ret = __il4965_up(il);
   5797	mutex_unlock(&il->mutex);
   5798
   5799	if (ret)
   5800		return ret;
   5801
   5802	if (il_is_rfkill(il))
   5803		goto out;
   5804
   5805	D_INFO("Start UP work done.\n");
   5806
   5807	/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
   5808	 * mac80211 will not be run successfully. */
   5809	ret = wait_event_timeout(il->wait_command_queue,
   5810				 test_bit(S_READY, &il->status),
   5811				 UCODE_READY_TIMEOUT);
   5812	if (!ret) {
   5813		if (!test_bit(S_READY, &il->status)) {
   5814			IL_ERR("START_ALIVE timeout after %dms.\n",
   5815				jiffies_to_msecs(UCODE_READY_TIMEOUT));
   5816			return -ETIMEDOUT;
   5817		}
   5818	}
   5819
   5820	il4965_led_enable(il);
   5821
   5822out:
   5823	il->is_open = 1;
   5824	D_MAC80211("leave\n");
   5825	return 0;
   5826}
   5827
   5828void
   5829il4965_mac_stop(struct ieee80211_hw *hw)
   5830{
   5831	struct il_priv *il = hw->priv;
   5832
   5833	D_MAC80211("enter\n");
   5834
   5835	if (!il->is_open)
   5836		return;
   5837
   5838	il->is_open = 0;
   5839
   5840	il4965_down(il);
   5841
   5842	flush_workqueue(il->workqueue);
   5843
   5844	/* User space software may expect getting rfkill changes
   5845	 * even if interface is down */
   5846	_il_wr(il, CSR_INT, 0xFFFFFFFF);
   5847	il_enable_rfkill_int(il);
   5848
   5849	D_MAC80211("leave\n");
   5850}
   5851
   5852void
   5853il4965_mac_tx(struct ieee80211_hw *hw,
   5854	      struct ieee80211_tx_control *control,
   5855	      struct sk_buff *skb)
   5856{
   5857	struct il_priv *il = hw->priv;
   5858
   5859	D_MACDUMP("enter\n");
   5860
   5861	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
   5862	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
   5863
   5864	if (il4965_tx_skb(il, control->sta, skb))
   5865		dev_kfree_skb_any(skb);
   5866
   5867	D_MACDUMP("leave\n");
   5868}
   5869
   5870void
   5871il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   5872			   struct ieee80211_key_conf *keyconf,
   5873			   struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
   5874{
   5875	struct il_priv *il = hw->priv;
   5876
   5877	D_MAC80211("enter\n");
   5878
   5879	il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
   5880
   5881	D_MAC80211("leave\n");
   5882}
   5883
   5884int
   5885il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
   5886		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
   5887		   struct ieee80211_key_conf *key)
   5888{
   5889	struct il_priv *il = hw->priv;
   5890	int ret;
   5891	u8 sta_id;
   5892	bool is_default_wep_key = false;
   5893
   5894	D_MAC80211("enter\n");
   5895
   5896	if (il->cfg->mod_params->sw_crypto) {
   5897		D_MAC80211("leave - hwcrypto disabled\n");
   5898		return -EOPNOTSUPP;
   5899	}
   5900
   5901	/*
   5902	 * To support IBSS RSN, don't program group keys in IBSS, the
   5903	 * hardware will then not attempt to decrypt the frames.
   5904	 */
   5905	if (vif->type == NL80211_IFTYPE_ADHOC &&
   5906	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
   5907		D_MAC80211("leave - ad-hoc group key\n");
   5908		return -EOPNOTSUPP;
   5909	}
   5910
   5911	sta_id = il_sta_id_or_broadcast(il, sta);
   5912	if (sta_id == IL_INVALID_STATION)
   5913		return -EINVAL;
   5914
   5915	mutex_lock(&il->mutex);
   5916	il_scan_cancel_timeout(il, 100);
   5917
   5918	/*
   5919	 * If we are getting WEP group key and we didn't receive any key mapping
   5920	 * so far, we are in legacy wep mode (group key only), otherwise we are
   5921	 * in 1X mode.
   5922	 * In legacy wep mode, we use another host command to the uCode.
   5923	 */
   5924	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
   5925	     key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
   5926		if (cmd == SET_KEY)
   5927			is_default_wep_key = !il->_4965.key_mapping_keys;
   5928		else
   5929			is_default_wep_key =
   5930			    (key->hw_key_idx == HW_KEY_DEFAULT);
   5931	}
   5932
   5933	switch (cmd) {
   5934	case SET_KEY:
   5935		if (is_default_wep_key)
   5936			ret = il4965_set_default_wep_key(il, key);
   5937		else
   5938			ret = il4965_set_dynamic_key(il, key, sta_id);
   5939
   5940		D_MAC80211("enable hwcrypto key\n");
   5941		break;
   5942	case DISABLE_KEY:
   5943		if (is_default_wep_key)
   5944			ret = il4965_remove_default_wep_key(il, key);
   5945		else
   5946			ret = il4965_remove_dynamic_key(il, key, sta_id);
   5947
   5948		D_MAC80211("disable hwcrypto key\n");
   5949		break;
   5950	default:
   5951		ret = -EINVAL;
   5952	}
   5953
   5954	mutex_unlock(&il->mutex);
   5955	D_MAC80211("leave\n");
   5956
   5957	return ret;
   5958}
   5959
   5960int
   5961il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   5962			struct ieee80211_ampdu_params *params)
   5963{
   5964	struct il_priv *il = hw->priv;
   5965	int ret = -EINVAL;
   5966	struct ieee80211_sta *sta = params->sta;
   5967	enum ieee80211_ampdu_mlme_action action = params->action;
   5968	u16 tid = params->tid;
   5969	u16 *ssn = &params->ssn;
   5970
   5971	D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
   5972
   5973	if (!(il->cfg->sku & IL_SKU_N))
   5974		return -EACCES;
   5975
   5976	mutex_lock(&il->mutex);
   5977
   5978	switch (action) {
   5979	case IEEE80211_AMPDU_RX_START:
   5980		D_HT("start Rx\n");
   5981		ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
   5982		break;
   5983	case IEEE80211_AMPDU_RX_STOP:
   5984		D_HT("stop Rx\n");
   5985		ret = il4965_sta_rx_agg_stop(il, sta, tid);
   5986		if (test_bit(S_EXIT_PENDING, &il->status))
   5987			ret = 0;
   5988		break;
   5989	case IEEE80211_AMPDU_TX_START:
   5990		D_HT("start Tx\n");
   5991		ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
   5992		break;
   5993	case IEEE80211_AMPDU_TX_STOP_CONT:
   5994	case IEEE80211_AMPDU_TX_STOP_FLUSH:
   5995	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
   5996		D_HT("stop Tx\n");
   5997		ret = il4965_tx_agg_stop(il, vif, sta, tid);
   5998		if (test_bit(S_EXIT_PENDING, &il->status))
   5999			ret = 0;
   6000		break;
   6001	case IEEE80211_AMPDU_TX_OPERATIONAL:
   6002		ret = 0;
   6003		break;
   6004	}
   6005	mutex_unlock(&il->mutex);
   6006
   6007	return ret;
   6008}
   6009
   6010int
   6011il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   6012		   struct ieee80211_sta *sta)
   6013{
   6014	struct il_priv *il = hw->priv;
   6015	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
   6016	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
   6017	int ret;
   6018	u8 sta_id;
   6019
   6020	D_INFO("received request to add station %pM\n", sta->addr);
   6021	mutex_lock(&il->mutex);
   6022	D_INFO("proceeding to add station %pM\n", sta->addr);
   6023	sta_priv->common.sta_id = IL_INVALID_STATION;
   6024
   6025	atomic_set(&sta_priv->pending_frames, 0);
   6026
   6027	ret =
   6028	    il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
   6029	if (ret) {
   6030		IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
   6031		/* Should we return success if return code is EEXIST ? */
   6032		mutex_unlock(&il->mutex);
   6033		return ret;
   6034	}
   6035
   6036	sta_priv->common.sta_id = sta_id;
   6037
   6038	/* Initialize rate scaling */
   6039	D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
   6040	il4965_rs_rate_init(il, sta, sta_id);
   6041	mutex_unlock(&il->mutex);
   6042
   6043	return 0;
   6044}
   6045
   6046void
   6047il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   6048			  struct ieee80211_channel_switch *ch_switch)
   6049{
   6050	struct il_priv *il = hw->priv;
   6051	const struct il_channel_info *ch_info;
   6052	struct ieee80211_conf *conf = &hw->conf;
   6053	struct ieee80211_channel *channel = ch_switch->chandef.chan;
   6054	struct il_ht_config *ht_conf = &il->current_ht_config;
   6055	u16 ch;
   6056
   6057	D_MAC80211("enter\n");
   6058
   6059	mutex_lock(&il->mutex);
   6060
   6061	if (il_is_rfkill(il))
   6062		goto out;
   6063
   6064	if (test_bit(S_EXIT_PENDING, &il->status) ||
   6065	    test_bit(S_SCANNING, &il->status) ||
   6066	    test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
   6067		goto out;
   6068
   6069	if (!il_is_associated(il))
   6070		goto out;
   6071
   6072	if (!il->ops->set_channel_switch)
   6073		goto out;
   6074
   6075	ch = channel->hw_value;
   6076	if (le16_to_cpu(il->active.channel) == ch)
   6077		goto out;
   6078
   6079	ch_info = il_get_channel_info(il, channel->band, ch);
   6080	if (!il_is_channel_valid(ch_info)) {
   6081		D_MAC80211("invalid channel\n");
   6082		goto out;
   6083	}
   6084
   6085	spin_lock_irq(&il->lock);
   6086
   6087	il->current_ht_config.smps = conf->smps_mode;
   6088
   6089	/* Configure HT40 channels */
   6090	switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
   6091	case NL80211_CHAN_NO_HT:
   6092	case NL80211_CHAN_HT20:
   6093		il->ht.is_40mhz = false;
   6094		il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
   6095		break;
   6096	case NL80211_CHAN_HT40MINUS:
   6097		il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
   6098		il->ht.is_40mhz = true;
   6099		break;
   6100	case NL80211_CHAN_HT40PLUS:
   6101		il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
   6102		il->ht.is_40mhz = true;
   6103		break;
   6104	}
   6105
   6106	if ((le16_to_cpu(il->staging.channel) != ch))
   6107		il->staging.flags = 0;
   6108
   6109	il_set_rxon_channel(il, channel);
   6110	il_set_rxon_ht(il, ht_conf);
   6111	il_set_flags_for_band(il, channel->band, il->vif);
   6112
   6113	spin_unlock_irq(&il->lock);
   6114
   6115	il_set_rate(il);
   6116	/*
   6117	 * at this point, staging_rxon has the
   6118	 * configuration for channel switch
   6119	 */
   6120	set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
   6121	il->switch_channel = cpu_to_le16(ch);
   6122	if (il->ops->set_channel_switch(il, ch_switch)) {
   6123		clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
   6124		il->switch_channel = 0;
   6125		ieee80211_chswitch_done(il->vif, false);
   6126	}
   6127
   6128out:
   6129	mutex_unlock(&il->mutex);
   6130	D_MAC80211("leave\n");
   6131}
   6132
   6133void
   6134il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
   6135			unsigned int *total_flags, u64 multicast)
   6136{
   6137	struct il_priv *il = hw->priv;
   6138	__le32 filter_or = 0, filter_nand = 0;
   6139
   6140#define CHK(test, flag)	do { \
   6141	if (*total_flags & (test))		\
   6142		filter_or |= (flag);		\
   6143	else					\
   6144		filter_nand |= (flag);		\
   6145	} while (0)
   6146
   6147	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
   6148		   *total_flags);
   6149
   6150	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
   6151	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
   6152	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
   6153	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
   6154
   6155#undef CHK
   6156
   6157	mutex_lock(&il->mutex);
   6158
   6159	il->staging.filter_flags &= ~filter_nand;
   6160	il->staging.filter_flags |= filter_or;
   6161
   6162	/*
   6163	 * Not committing directly because hardware can perform a scan,
   6164	 * but we'll eventually commit the filter flags change anyway.
   6165	 */
   6166
   6167	mutex_unlock(&il->mutex);
   6168
   6169	/*
   6170	 * Receiving all multicast frames is always enabled by the
   6171	 * default flags setup in il_connection_init_rx_config()
   6172	 * since we currently do not support programming multicast
   6173	 * filters into the device.
   6174	 */
   6175	*total_flags &=
   6176	    FIF_OTHER_BSS | FIF_ALLMULTI |
   6177	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
   6178}
   6179
   6180/*****************************************************************************
   6181 *
   6182 * driver setup and teardown
   6183 *
   6184 *****************************************************************************/
   6185
   6186static void
   6187il4965_bg_txpower_work(struct work_struct *work)
   6188{
   6189	struct il_priv *il = container_of(work, struct il_priv,
   6190					  txpower_work);
   6191
   6192	mutex_lock(&il->mutex);
   6193
   6194	/* If a scan happened to start before we got here
   6195	 * then just return; the stats notification will
   6196	 * kick off another scheduled work to compensate for
   6197	 * any temperature delta we missed here. */
   6198	if (test_bit(S_EXIT_PENDING, &il->status) ||
   6199	    test_bit(S_SCANNING, &il->status))
   6200		goto out;
   6201
   6202	/* Regardless of if we are associated, we must reconfigure the
   6203	 * TX power since frames can be sent on non-radar channels while
   6204	 * not associated */
   6205	il->ops->send_tx_power(il);
   6206
   6207	/* Update last_temperature to keep is_calib_needed from running
   6208	 * when it isn't needed... */
   6209	il->last_temperature = il->temperature;
   6210out:
   6211	mutex_unlock(&il->mutex);
   6212}
   6213
   6214static void
   6215il4965_setup_deferred_work(struct il_priv *il)
   6216{
   6217	il->workqueue = create_singlethread_workqueue(DRV_NAME);
   6218
   6219	init_waitqueue_head(&il->wait_command_queue);
   6220
   6221	INIT_WORK(&il->restart, il4965_bg_restart);
   6222	INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
   6223	INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
   6224	INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
   6225	INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
   6226
   6227	il_setup_scan_deferred_work(il);
   6228
   6229	INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
   6230
   6231	timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
   6232
   6233	timer_setup(&il->watchdog, il_bg_watchdog, 0);
   6234
   6235	tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
   6236}
   6237
   6238static void
   6239il4965_cancel_deferred_work(struct il_priv *il)
   6240{
   6241	cancel_work_sync(&il->txpower_work);
   6242	cancel_delayed_work_sync(&il->init_alive_start);
   6243	cancel_delayed_work(&il->alive_start);
   6244	cancel_work_sync(&il->run_time_calib_work);
   6245
   6246	il_cancel_scan_deferred_work(il);
   6247
   6248	del_timer_sync(&il->stats_periodic);
   6249}
   6250
   6251static void
   6252il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
   6253{
   6254	int i;
   6255
   6256	for (i = 0; i < RATE_COUNT_LEGACY; i++) {
   6257		rates[i].bitrate = il_rates[i].ieee * 5;
   6258		rates[i].hw_value = i;	/* Rate scaling will work on idxes */
   6259		rates[i].hw_value_short = i;
   6260		rates[i].flags = 0;
   6261		if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
   6262			/*
   6263			 * If CCK != 1M then set short preamble rate flag.
   6264			 */
   6265			rates[i].flags |=
   6266			    (il_rates[i].plcp ==
   6267			     RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
   6268		}
   6269	}
   6270}
   6271
   6272/*
   6273 * Acquire il->lock before calling this function !
   6274 */
   6275void
   6276il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
   6277{
   6278	il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
   6279	il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
   6280}
   6281
   6282void
   6283il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
   6284			   int tx_fifo_id, int scd_retry)
   6285{
   6286	int txq_id = txq->q.id;
   6287
   6288	/* Find out whether to activate Tx queue */
   6289	int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
   6290
   6291	/* Set up and activate */
   6292	il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
   6293		   (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   6294		   (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
   6295		   (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
   6296		   (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
   6297		   IL49_SCD_QUEUE_STTS_REG_MSK);
   6298
   6299	txq->sched_retry = scd_retry;
   6300
   6301	D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
   6302	       scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
   6303}
   6304
   6305static const struct ieee80211_ops il4965_mac_ops = {
   6306	.tx = il4965_mac_tx,
   6307	.start = il4965_mac_start,
   6308	.stop = il4965_mac_stop,
   6309	.add_interface = il_mac_add_interface,
   6310	.remove_interface = il_mac_remove_interface,
   6311	.change_interface = il_mac_change_interface,
   6312	.config = il_mac_config,
   6313	.configure_filter = il4965_configure_filter,
   6314	.set_key = il4965_mac_set_key,
   6315	.update_tkip_key = il4965_mac_update_tkip_key,
   6316	.conf_tx = il_mac_conf_tx,
   6317	.reset_tsf = il_mac_reset_tsf,
   6318	.bss_info_changed = il_mac_bss_info_changed,
   6319	.ampdu_action = il4965_mac_ampdu_action,
   6320	.hw_scan = il_mac_hw_scan,
   6321	.sta_add = il4965_mac_sta_add,
   6322	.sta_remove = il_mac_sta_remove,
   6323	.channel_switch = il4965_mac_channel_switch,
   6324	.tx_last_beacon = il_mac_tx_last_beacon,
   6325	.flush = il_mac_flush,
   6326};
   6327
   6328static int
   6329il4965_init_drv(struct il_priv *il)
   6330{
   6331	int ret;
   6332
   6333	spin_lock_init(&il->sta_lock);
   6334	spin_lock_init(&il->hcmd_lock);
   6335
   6336	INIT_LIST_HEAD(&il->free_frames);
   6337
   6338	mutex_init(&il->mutex);
   6339
   6340	il->ieee_channels = NULL;
   6341	il->ieee_rates = NULL;
   6342	il->band = NL80211_BAND_2GHZ;
   6343
   6344	il->iw_mode = NL80211_IFTYPE_STATION;
   6345	il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
   6346	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
   6347
   6348	/* initialize force reset */
   6349	il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
   6350
   6351	/* Choose which receivers/antennas to use */
   6352	if (il->ops->set_rxon_chain)
   6353		il->ops->set_rxon_chain(il);
   6354
   6355	il_init_scan_params(il);
   6356
   6357	ret = il_init_channel_map(il);
   6358	if (ret) {
   6359		IL_ERR("initializing regulatory failed: %d\n", ret);
   6360		goto err;
   6361	}
   6362
   6363	ret = il_init_geos(il);
   6364	if (ret) {
   6365		IL_ERR("initializing geos failed: %d\n", ret);
   6366		goto err_free_channel_map;
   6367	}
   6368	il4965_init_hw_rates(il, il->ieee_rates);
   6369
   6370	return 0;
   6371
   6372err_free_channel_map:
   6373	il_free_channel_map(il);
   6374err:
   6375	return ret;
   6376}
   6377
   6378static void
   6379il4965_uninit_drv(struct il_priv *il)
   6380{
   6381	il_free_geos(il);
   6382	il_free_channel_map(il);
   6383	kfree(il->scan_cmd);
   6384}
   6385
   6386static void
   6387il4965_hw_detect(struct il_priv *il)
   6388{
   6389	il->hw_rev = _il_rd(il, CSR_HW_REV);
   6390	il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
   6391	il->rev_id = il->pci_dev->revision;
   6392	D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
   6393}
   6394
   6395static const struct il_sensitivity_ranges il4965_sensitivity = {
   6396	.min_nrg_cck = 97,
   6397	.max_nrg_cck = 0,	/* not used, set to 0 */
   6398
   6399	.auto_corr_min_ofdm = 85,
   6400	.auto_corr_min_ofdm_mrc = 170,
   6401	.auto_corr_min_ofdm_x1 = 105,
   6402	.auto_corr_min_ofdm_mrc_x1 = 220,
   6403
   6404	.auto_corr_max_ofdm = 120,
   6405	.auto_corr_max_ofdm_mrc = 210,
   6406	.auto_corr_max_ofdm_x1 = 140,
   6407	.auto_corr_max_ofdm_mrc_x1 = 270,
   6408
   6409	.auto_corr_min_cck = 125,
   6410	.auto_corr_max_cck = 200,
   6411	.auto_corr_min_cck_mrc = 200,
   6412	.auto_corr_max_cck_mrc = 400,
   6413
   6414	.nrg_th_cck = 100,
   6415	.nrg_th_ofdm = 100,
   6416
   6417	.barker_corr_th_min = 190,
   6418	.barker_corr_th_min_mrc = 390,
   6419	.nrg_th_cca = 62,
   6420};
   6421
   6422static void
   6423il4965_set_hw_params(struct il_priv *il)
   6424{
   6425	il->hw_params.bcast_id = IL4965_BROADCAST_ID;
   6426	il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
   6427	il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
   6428	if (il->cfg->mod_params->amsdu_size_8K)
   6429		il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
   6430	else
   6431		il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
   6432
   6433	il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
   6434
   6435	if (il->cfg->mod_params->disable_11n)
   6436		il->cfg->sku &= ~IL_SKU_N;
   6437
   6438	if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
   6439	    il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
   6440		il->cfg->num_of_queues =
   6441		    il->cfg->mod_params->num_of_queues;
   6442
   6443	il->hw_params.max_txq_num = il->cfg->num_of_queues;
   6444	il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
   6445	il->hw_params.scd_bc_tbls_size =
   6446	    il->cfg->num_of_queues *
   6447	    sizeof(struct il4965_scd_bc_tbl);
   6448
   6449	il->hw_params.tfd_size = sizeof(struct il_tfd);
   6450	il->hw_params.max_stations = IL4965_STATION_COUNT;
   6451	il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
   6452	il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
   6453	il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
   6454	il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
   6455
   6456	il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
   6457
   6458	il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
   6459	il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
   6460	il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
   6461	il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
   6462
   6463	il->hw_params.ct_kill_threshold =
   6464	   celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY);
   6465
   6466	il->hw_params.sens = &il4965_sensitivity;
   6467	il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
   6468}
   6469
   6470static int
   6471il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   6472{
   6473	int err = 0;
   6474	struct il_priv *il;
   6475	struct ieee80211_hw *hw;
   6476	struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
   6477	unsigned long flags;
   6478	u16 pci_cmd;
   6479
   6480	/************************
   6481	 * 1. Allocating HW data
   6482	 ************************/
   6483
   6484	hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
   6485	if (!hw) {
   6486		err = -ENOMEM;
   6487		goto out;
   6488	}
   6489	il = hw->priv;
   6490	il->hw = hw;
   6491	SET_IEEE80211_DEV(hw, &pdev->dev);
   6492
   6493	D_INFO("*** LOAD DRIVER ***\n");
   6494	il->cfg = cfg;
   6495	il->ops = &il4965_ops;
   6496#ifdef CONFIG_IWLEGACY_DEBUGFS
   6497	il->debugfs_ops = &il4965_debugfs_ops;
   6498#endif
   6499	il->pci_dev = pdev;
   6500	il->inta_mask = CSR_INI_SET_MASK;
   6501
   6502	/**************************
   6503	 * 2. Initializing PCI bus
   6504	 **************************/
   6505	pci_disable_link_state(pdev,
   6506			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
   6507			       PCIE_LINK_STATE_CLKPM);
   6508
   6509	if (pci_enable_device(pdev)) {
   6510		err = -ENODEV;
   6511		goto out_ieee80211_free_hw;
   6512	}
   6513
   6514	pci_set_master(pdev);
   6515
   6516	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
   6517	if (err) {
   6518		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
   6519		/* both attempts failed: */
   6520		if (err) {
   6521			IL_WARN("No suitable DMA available.\n");
   6522			goto out_pci_disable_device;
   6523		}
   6524	}
   6525
   6526	err = pci_request_regions(pdev, DRV_NAME);
   6527	if (err)
   6528		goto out_pci_disable_device;
   6529
   6530	pci_set_drvdata(pdev, il);
   6531
   6532	/***********************
   6533	 * 3. Read REV register
   6534	 ***********************/
   6535	il->hw_base = pci_ioremap_bar(pdev, 0);
   6536	if (!il->hw_base) {
   6537		err = -ENODEV;
   6538		goto out_pci_release_regions;
   6539	}
   6540
   6541	D_INFO("pci_resource_len = 0x%08llx\n",
   6542	       (unsigned long long)pci_resource_len(pdev, 0));
   6543	D_INFO("pci_resource_base = %p\n", il->hw_base);
   6544
   6545	/* these spin locks will be used in apm_ops.init and EEPROM access
   6546	 * we should init now
   6547	 */
   6548	spin_lock_init(&il->reg_lock);
   6549	spin_lock_init(&il->lock);
   6550
   6551	/*
   6552	 * stop and reset the on-board processor just in case it is in a
   6553	 * strange state ... like being left stranded by a primary kernel
   6554	 * and this is now the kdump kernel trying to start up
   6555	 */
   6556	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
   6557
   6558	il4965_hw_detect(il);
   6559	IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
   6560
   6561	/* We disable the RETRY_TIMEOUT register (0x41) to keep
   6562	 * PCI Tx retries from interfering with C3 CPU state */
   6563	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
   6564
   6565	il4965_prepare_card_hw(il);
   6566	if (!il->hw_ready) {
   6567		IL_WARN("Failed, HW not ready\n");
   6568		err = -EIO;
   6569		goto out_iounmap;
   6570	}
   6571
   6572	/*****************
   6573	 * 4. Read EEPROM
   6574	 *****************/
   6575	/* Read the EEPROM */
   6576	err = il_eeprom_init(il);
   6577	if (err) {
   6578		IL_ERR("Unable to init EEPROM\n");
   6579		goto out_iounmap;
   6580	}
   6581	err = il4965_eeprom_check_version(il);
   6582	if (err)
   6583		goto out_free_eeprom;
   6584
   6585	/* extract MAC Address */
   6586	il4965_eeprom_get_mac(il, il->addresses[0].addr);
   6587	D_INFO("MAC address: %pM\n", il->addresses[0].addr);
   6588	il->hw->wiphy->addresses = il->addresses;
   6589	il->hw->wiphy->n_addresses = 1;
   6590
   6591	/************************
   6592	 * 5. Setup HW constants
   6593	 ************************/
   6594	il4965_set_hw_params(il);
   6595
   6596	/*******************
   6597	 * 6. Setup il
   6598	 *******************/
   6599
   6600	err = il4965_init_drv(il);
   6601	if (err)
   6602		goto out_free_eeprom;
   6603	/* At this point both hw and il are initialized. */
   6604
   6605	/********************
   6606	 * 7. Setup services
   6607	 ********************/
   6608	spin_lock_irqsave(&il->lock, flags);
   6609	il_disable_interrupts(il);
   6610	spin_unlock_irqrestore(&il->lock, flags);
   6611
   6612	pci_enable_msi(il->pci_dev);
   6613
   6614	err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
   6615	if (err) {
   6616		IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
   6617		goto out_disable_msi;
   6618	}
   6619
   6620	il4965_setup_deferred_work(il);
   6621	il4965_setup_handlers(il);
   6622
   6623	/*********************************************
   6624	 * 8. Enable interrupts and read RFKILL state
   6625	 *********************************************/
   6626
   6627	/* enable rfkill interrupt: hw bug w/a */
   6628	pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
   6629	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
   6630		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
   6631		pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
   6632	}
   6633
   6634	il_enable_rfkill_int(il);
   6635
   6636	/* If platform's RF_KILL switch is NOT set to KILL */
   6637	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
   6638		clear_bit(S_RFKILL, &il->status);
   6639	else
   6640		set_bit(S_RFKILL, &il->status);
   6641
   6642	wiphy_rfkill_set_hw_state(il->hw->wiphy,
   6643				  test_bit(S_RFKILL, &il->status));
   6644
   6645	il_power_initialize(il);
   6646
   6647	init_completion(&il->_4965.firmware_loading_complete);
   6648
   6649	err = il4965_request_firmware(il, true);
   6650	if (err)
   6651		goto out_destroy_workqueue;
   6652
   6653	return 0;
   6654
   6655out_destroy_workqueue:
   6656	destroy_workqueue(il->workqueue);
   6657	il->workqueue = NULL;
   6658	free_irq(il->pci_dev->irq, il);
   6659out_disable_msi:
   6660	pci_disable_msi(il->pci_dev);
   6661	il4965_uninit_drv(il);
   6662out_free_eeprom:
   6663	il_eeprom_free(il);
   6664out_iounmap:
   6665	iounmap(il->hw_base);
   6666out_pci_release_regions:
   6667	pci_release_regions(pdev);
   6668out_pci_disable_device:
   6669	pci_disable_device(pdev);
   6670out_ieee80211_free_hw:
   6671	ieee80211_free_hw(il->hw);
   6672out:
   6673	return err;
   6674}
   6675
   6676static void
   6677il4965_pci_remove(struct pci_dev *pdev)
   6678{
   6679	struct il_priv *il = pci_get_drvdata(pdev);
   6680	unsigned long flags;
   6681
   6682	if (!il)
   6683		return;
   6684
   6685	wait_for_completion(&il->_4965.firmware_loading_complete);
   6686
   6687	D_INFO("*** UNLOAD DRIVER ***\n");
   6688
   6689	il_dbgfs_unregister(il);
   6690	sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
   6691
   6692	/* ieee80211_unregister_hw call wil cause il_mac_stop to
   6693	 * to be called and il4965_down since we are removing the device
   6694	 * we need to set S_EXIT_PENDING bit.
   6695	 */
   6696	set_bit(S_EXIT_PENDING, &il->status);
   6697
   6698	il_leds_exit(il);
   6699
   6700	if (il->mac80211_registered) {
   6701		ieee80211_unregister_hw(il->hw);
   6702		il->mac80211_registered = 0;
   6703	} else {
   6704		il4965_down(il);
   6705	}
   6706
   6707	/*
   6708	 * Make sure device is reset to low power before unloading driver.
   6709	 * This may be redundant with il4965_down(), but there are paths to
   6710	 * run il4965_down() without calling apm_ops.stop(), and there are
   6711	 * paths to avoid running il4965_down() at all before leaving driver.
   6712	 * This (inexpensive) call *makes sure* device is reset.
   6713	 */
   6714	il_apm_stop(il);
   6715
   6716	/* make sure we flush any pending irq or
   6717	 * tasklet for the driver
   6718	 */
   6719	spin_lock_irqsave(&il->lock, flags);
   6720	il_disable_interrupts(il);
   6721	spin_unlock_irqrestore(&il->lock, flags);
   6722
   6723	il4965_synchronize_irq(il);
   6724
   6725	il4965_dealloc_ucode_pci(il);
   6726
   6727	if (il->rxq.bd)
   6728		il4965_rx_queue_free(il, &il->rxq);
   6729	il4965_hw_txq_ctx_free(il);
   6730
   6731	il_eeprom_free(il);
   6732
   6733	/*netif_stop_queue(dev); */
   6734
   6735	/* ieee80211_unregister_hw calls il_mac_stop, which flushes
   6736	 * il->workqueue... so we can't take down the workqueue
   6737	 * until now... */
   6738	destroy_workqueue(il->workqueue);
   6739	il->workqueue = NULL;
   6740
   6741	free_irq(il->pci_dev->irq, il);
   6742	pci_disable_msi(il->pci_dev);
   6743	iounmap(il->hw_base);
   6744	pci_release_regions(pdev);
   6745	pci_disable_device(pdev);
   6746
   6747	il4965_uninit_drv(il);
   6748
   6749	dev_kfree_skb(il->beacon_skb);
   6750
   6751	ieee80211_free_hw(il->hw);
   6752}
   6753
   6754/*
   6755 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
   6756 * must be called under il->lock and mac access
   6757 */
   6758void
   6759il4965_txq_set_sched(struct il_priv *il, u32 mask)
   6760{
   6761	il_wr_prph(il, IL49_SCD_TXFACT, mask);
   6762}
   6763
   6764/*****************************************************************************
   6765 *
   6766 * driver and module entry point
   6767 *
   6768 *****************************************************************************/
   6769
   6770/* Hardware specific file defines the PCI IDs table for that hardware module */
   6771static const struct pci_device_id il4965_hw_card_ids[] = {
   6772	{IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
   6773	{IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
   6774	{0}
   6775};
   6776MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
   6777
   6778static struct pci_driver il4965_driver = {
   6779	.name = DRV_NAME,
   6780	.id_table = il4965_hw_card_ids,
   6781	.probe = il4965_pci_probe,
   6782	.remove = il4965_pci_remove,
   6783	.driver.pm = IL_LEGACY_PM_OPS,
   6784};
   6785
   6786static int __init
   6787il4965_init(void)
   6788{
   6789
   6790	int ret;
   6791	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
   6792	pr_info(DRV_COPYRIGHT "\n");
   6793
   6794	ret = il4965_rate_control_register();
   6795	if (ret) {
   6796		pr_err("Unable to register rate control algorithm: %d\n", ret);
   6797		return ret;
   6798	}
   6799
   6800	ret = pci_register_driver(&il4965_driver);
   6801	if (ret) {
   6802		pr_err("Unable to initialize PCI module\n");
   6803		goto error_register;
   6804	}
   6805
   6806	return ret;
   6807
   6808error_register:
   6809	il4965_rate_control_unregister();
   6810	return ret;
   6811}
   6812
   6813static void __exit
   6814il4965_exit(void)
   6815{
   6816	pci_unregister_driver(&il4965_driver);
   6817	il4965_rate_control_unregister();
   6818}
   6819
   6820module_exit(il4965_exit);
   6821module_init(il4965_init);
   6822
   6823#ifdef CONFIG_IWLEGACY_DEBUG
   6824module_param_named(debug, il_debug_level, uint, 0644);
   6825MODULE_PARM_DESC(debug, "debug output mask");
   6826#endif
   6827
   6828module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444);
   6829MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
   6830module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444);
   6831MODULE_PARM_DESC(queues_num, "number of hw queues.");
   6832module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444);
   6833MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
   6834module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444);
   6835MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
   6836module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444);
   6837MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");