cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rtllib_tx.c (26906B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
      4 *
      5 * Contact Information:
      6 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
      7 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
      8 *
      9 * Few modifications for Realtek's Wi-Fi drivers by
     10 * Andrea Merello <andrea.merello@gmail.com>
     11 *
     12 * A special thanks goes to Realtek for their support !
     13 */
     14#include <linux/compiler.h>
     15#include <linux/errno.h>
     16#include <linux/if_arp.h>
     17#include <linux/in6.h>
     18#include <linux/in.h>
     19#include <linux/ip.h>
     20#include <linux/kernel.h>
     21#include <linux/module.h>
     22#include <linux/netdevice.h>
     23#include <linux/pci.h>
     24#include <linux/proc_fs.h>
     25#include <linux/skbuff.h>
     26#include <linux/slab.h>
     27#include <linux/tcp.h>
     28#include <linux/types.h>
     29#include <linux/wireless.h>
     30#include <linux/etherdevice.h>
     31#include <linux/uaccess.h>
     32#include <linux/if_vlan.h>
     33
     34#include "rtllib.h"
     35
     36/* 802.11 Data Frame
     37 *
     38 *
     39 * 802.11 frame_control for data frames - 2 bytes
     40 *      ,--------------------------------------------------------------------.
     41 * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |  9 |  a |  b  |  c  |  d  | e  |
     42 *      |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
     43 * val  | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 |  0 |  x |  x  |  x  |  x  | x  |
     44 *      |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
     45 * desc |  ver  | type  |  ^-subtype-^  |to |from|more|retry| pwr |more |wep |
     46 *      |       |       | x=0 data      |DS | DS |frag|     | mgm |data |    |
     47 *      |       |       | x=1 data+ack  |   |    |    |     |     |     |    |
     48 *      '--------------------------------------------------------------------'
     49 *                                           /\
     50 *                                           |
     51 * 802.11 Data Frame                         |
     52 *          ,--------- 'ctrl' expands to >---'
     53 *          |
     54 *       ,--'---,-------------------------------------------------------------.
     55 * Bytes |  2   |  2   |    6    |    6    |    6    |  2   | 0..2312 |   4  |
     56 *       |------|------|---------|---------|---------|------|---------|------|
     57 * Desc. | ctrl | dura |  DA/RA  |   TA    |    SA   | Sequ |  Frame  |  fcs |
     58 *       |      | tion | (BSSID) |         |         | ence |  data   |      |
     59 *       `--------------------------------------------------|         |------'
     60 * Total: 28 non-data bytes                                 `----.----'
     61 *                                                               |
     62 *        .- 'Frame data' expands to <---------------------------'
     63 *        |
     64 *        V
     65 *       ,---------------------------------------------------.
     66 * Bytes |  1   |  1   |    1    |    3     |  2   |  0-2304 |
     67 *       |------|------|---------|----------|------|---------|
     68 * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP      |
     69 *       | DSAP | SSAP |         |          |      | Packet  |
     70 *       | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8|      |         |
     71 *       `-----------------------------------------|         |
     72 * Total: 8 non-data bytes                         `----.----'
     73 *                                                      |
     74 *        .- 'IP Packet' expands, if WEP enabled, to <--'
     75 *        |
     76 *        V
     77 *       ,-----------------------.
     78 * Bytes |  4  |   0-2296  |  4  |
     79 *       |-----|-----------|-----|
     80 * Desc. | IV  | Encrypted | ICV |
     81 *       |     | IP Packet |     |
     82 *       `-----------------------'
     83 * Total: 8 non-data bytes
     84 *
     85 *
     86 * 802.3 Ethernet Data Frame
     87 *
     88 *       ,-----------------------------------------.
     89 * Bytes |   6   |   6   |  2   |  Variable |   4  |
     90 *       |-------|-------|------|-----------|------|
     91 * Desc. | Dest. | Source| Type | IP Packet |  fcs |
     92 *       |  MAC  |  MAC  |      |	   |      |
     93 *       `-----------------------------------------'
     94 * Total: 18 non-data bytes
     95 *
     96 * In the event that fragmentation is required, the incoming payload is split
     97 * into N parts of size ieee->fts.  The first fragment contains the SNAP header
     98 * and the remaining packets are just data.
     99 *
    100 * If encryption is enabled, each fragment payload size is reduced by enough
    101 * space to add the prefix and postfix (IV and ICV totalling 8 bytes in
    102 * the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
    103 * 500 without encryption it will take 3 frames.  With WEP it will take 4 frames
    104 * as the payload of each frame is reduced to 492 bytes.
    105 *
    106 * SKB visualization
    107 *
    108 * ,- skb->data
    109 * |
    110 * |    ETHERNET HEADER        ,-<-- PAYLOAD
    111 * |                           |     14 bytes from skb->data
    112 * |  2 bytes for Type --> ,T. |     (sizeof ethhdr)
    113 * |                       | | |
    114 * |,-Dest.--. ,--Src.---. | | |
    115 * |  6 bytes| | 6 bytes | | | |
    116 * v         | |         | | | |
    117 * 0         | v       1 | v | v           2
    118 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
    119 *     ^     | ^         | ^ |
    120 *     |     | |         | | |
    121 *     |     | |         | `T' <---- 2 bytes for Type
    122 *     |     | |         |
    123 *     |     | '---SNAP--' <-------- 6 bytes for SNAP
    124 *     |     |
    125 *     `-IV--' <-------------------- 4 bytes for IV (WEP)
    126 *
    127 *      SNAP HEADER
    128 *
    129 */
    130
    131static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
    132static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
    133
    134static int rtllib_put_snap(u8 *data, u16 h_proto)
    135{
    136	struct rtllib_snap_hdr *snap;
    137	u8 *oui;
    138
    139	snap = (struct rtllib_snap_hdr *)data;
    140	snap->dsap = 0xaa;
    141	snap->ssap = 0xaa;
    142	snap->ctrl = 0x03;
    143
    144	if (h_proto == 0x8137 || h_proto == 0x80f3)
    145		oui = P802_1H_OUI;
    146	else
    147		oui = RFC1042_OUI;
    148	snap->oui[0] = oui[0];
    149	snap->oui[1] = oui[1];
    150	snap->oui[2] = oui[2];
    151
    152	*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
    153
    154	return SNAP_SIZE + sizeof(u16);
    155}
    156
    157int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
    158			    int hdr_len)
    159{
    160	struct lib80211_crypt_data *crypt = NULL;
    161	int res;
    162
    163	crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
    164
    165	if (!(crypt && crypt->ops)) {
    166		netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
    167			    __func__);
    168		return -1;
    169	}
    170	/* To encrypt, frame format is:
    171	 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
    172	 */
    173
    174	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
    175	 * call both MSDU and MPDU encryption functions from here.
    176	 */
    177	atomic_inc(&crypt->refcnt);
    178	res = 0;
    179	if (crypt->ops->encrypt_msdu)
    180		res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
    181	if (res == 0 && crypt->ops->encrypt_mpdu)
    182		res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
    183
    184	atomic_dec(&crypt->refcnt);
    185	if (res < 0) {
    186		netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
    187			    ieee->dev->name, frag->len);
    188		return -1;
    189	}
    190
    191	return 0;
    192}
    193
    194
    195void rtllib_txb_free(struct rtllib_txb *txb)
    196{
    197	if (unlikely(!txb))
    198		return;
    199	kfree(txb);
    200}
    201
    202static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
    203					   gfp_t gfp_mask)
    204{
    205	struct rtllib_txb *txb;
    206	int i;
    207
    208	txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
    209		      gfp_mask);
    210	if (!txb)
    211		return NULL;
    212
    213	memset(txb, 0, sizeof(struct rtllib_txb));
    214	txb->nr_frags = nr_frags;
    215	txb->frag_size = cpu_to_le16(txb_size);
    216
    217	for (i = 0; i < nr_frags; i++) {
    218		txb->fragments[i] = dev_alloc_skb(txb_size);
    219		if (unlikely(!txb->fragments[i])) {
    220			i--;
    221			break;
    222		}
    223		memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
    224	}
    225	if (unlikely(i != nr_frags)) {
    226		while (i >= 0)
    227			dev_kfree_skb_any(txb->fragments[i--]);
    228		kfree(txb);
    229		return NULL;
    230	}
    231	return txb;
    232}
    233
    234static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
    235{
    236	struct ethhdr *eth;
    237	struct iphdr *ip;
    238
    239	eth = (struct ethhdr *)skb->data;
    240	if (eth->h_proto != htons(ETH_P_IP))
    241		return 0;
    242
    243#ifdef VERBOSE_DEBUG
    244	print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, skb->data,
    245			     skb->len);
    246#endif
    247	ip = ip_hdr(skb);
    248	switch (ip->tos & 0xfc) {
    249	case 0x20:
    250		return 2;
    251	case 0x40:
    252		return 1;
    253	case 0x60:
    254		return 3;
    255	case 0x80:
    256		return 4;
    257	case 0xa0:
    258		return 5;
    259	case 0xc0:
    260		return 6;
    261	case 0xe0:
    262		return 7;
    263	default:
    264		return 0;
    265	}
    266}
    267
    268static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
    269				    struct sk_buff *skb,
    270				    struct cb_desc *tcb_desc)
    271{
    272	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
    273	struct tx_ts_record *pTxTs = NULL;
    274	struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
    275
    276	if (rtllib_act_scanning(ieee, false))
    277		return;
    278
    279	if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
    280		return;
    281	if (!IsQoSDataFrame(skb->data))
    282		return;
    283	if (is_multicast_ether_addr(hdr->addr1))
    284		return;
    285
    286	if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
    287		return;
    288
    289	if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
    290		return;
    291
    292	if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
    293		return;
    294	if (pHTInfo->bCurrentAMPDUEnable) {
    295		if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
    296		    skb->priority, TX_DIR, true)) {
    297			netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
    298			return;
    299		}
    300		if (!pTxTs->TxAdmittedBARecord.b_valid) {
    301			if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
    302			    KEY_TYPE_NA)) {
    303				;
    304			} else if (tcb_desc->bdhcp == 1) {
    305				;
    306			} else if (!pTxTs->bDisable_AddBa) {
    307				TsStartAddBaProcess(ieee, pTxTs);
    308			}
    309			goto FORCED_AGG_SETTING;
    310		} else if (!pTxTs->bUsingBa) {
    311			if (SN_LESS(pTxTs->TxAdmittedBARecord.ba_start_seq_ctrl.field.seq_num,
    312			   (pTxTs->TxCurSeq+1)%4096))
    313				pTxTs->bUsingBa = true;
    314			else
    315				goto FORCED_AGG_SETTING;
    316		}
    317		if (ieee->iw_mode == IW_MODE_INFRA) {
    318			tcb_desc->bAMPDUEnable = true;
    319			tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
    320			tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
    321		}
    322	}
    323FORCED_AGG_SETTING:
    324	switch (pHTInfo->ForcedAMPDUMode) {
    325	case HT_AGG_AUTO:
    326		break;
    327
    328	case HT_AGG_FORCE_ENABLE:
    329		tcb_desc->bAMPDUEnable = true;
    330		tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
    331		tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
    332		break;
    333
    334	case HT_AGG_FORCE_DISABLE:
    335		tcb_desc->bAMPDUEnable = false;
    336		tcb_desc->ampdu_density = 0;
    337		tcb_desc->ampdu_factor = 0;
    338		break;
    339	}
    340}
    341
    342static void rtllib_query_ShortPreambleMode(struct rtllib_device *ieee,
    343					   struct cb_desc *tcb_desc)
    344{
    345	tcb_desc->bUseShortPreamble = false;
    346	if (tcb_desc->data_rate == 2)
    347		return;
    348	else if (ieee->current_network.capability &
    349		 WLAN_CAPABILITY_SHORT_PREAMBLE)
    350		tcb_desc->bUseShortPreamble = true;
    351}
    352
    353static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
    354				      struct cb_desc *tcb_desc)
    355{
    356	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
    357
    358	tcb_desc->bUseShortGI		= false;
    359
    360	if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
    361		return;
    362
    363	if (pHTInfo->bForcedShortGI) {
    364		tcb_desc->bUseShortGI = true;
    365		return;
    366	}
    367
    368	if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz)
    369		tcb_desc->bUseShortGI = true;
    370	else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz)
    371		tcb_desc->bUseShortGI = true;
    372}
    373
    374static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
    375				       struct cb_desc *tcb_desc)
    376{
    377	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
    378
    379	tcb_desc->bPacketBW = false;
    380
    381	if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
    382		return;
    383
    384	if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
    385		return;
    386
    387	if ((tcb_desc->data_rate & 0x80) == 0)
    388		return;
    389	if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
    390	    !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
    391		tcb_desc->bPacketBW = true;
    392}
    393
    394static void rtllib_query_protectionmode(struct rtllib_device *ieee,
    395					struct cb_desc *tcb_desc,
    396					struct sk_buff *skb)
    397{
    398	struct rt_hi_throughput *pHTInfo;
    399
    400	tcb_desc->bRTSSTBC			= false;
    401	tcb_desc->bRTSUseShortGI		= false;
    402	tcb_desc->bCTSEnable			= false;
    403	tcb_desc->RTSSC				= 0;
    404	tcb_desc->bRTSBW			= false;
    405
    406	if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
    407		return;
    408
    409	if (is_broadcast_ether_addr(skb->data+16))
    410		return;
    411
    412	if (ieee->mode < IEEE_N_24G) {
    413		if (skb->len > ieee->rts) {
    414			tcb_desc->bRTSEnable = true;
    415			tcb_desc->rts_rate = MGN_24M;
    416		} else if (ieee->current_network.buseprotection) {
    417			tcb_desc->bRTSEnable = true;
    418			tcb_desc->bCTSEnable = true;
    419			tcb_desc->rts_rate = MGN_24M;
    420		}
    421		return;
    422	}
    423
    424	pHTInfo = ieee->pHTInfo;
    425
    426	while (true) {
    427		if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
    428			tcb_desc->bCTSEnable	= true;
    429			tcb_desc->rts_rate  =	MGN_24M;
    430			tcb_desc->bRTSEnable = true;
    431			break;
    432		} else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
    433			   HT_IOT_ACT_PURE_N_MODE)) {
    434			tcb_desc->bRTSEnable = true;
    435			tcb_desc->rts_rate  =	MGN_24M;
    436			break;
    437		}
    438		if (ieee->current_network.buseprotection) {
    439			tcb_desc->bRTSEnable = true;
    440			tcb_desc->bCTSEnable = true;
    441			tcb_desc->rts_rate = MGN_24M;
    442			break;
    443		}
    444		if (pHTInfo->bCurrentHTSupport  && pHTInfo->bEnableHT) {
    445			u8 HTOpMode = pHTInfo->CurrentOpMode;
    446
    447			if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
    448			     HTOpMode == 3)) ||
    449			     (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
    450				tcb_desc->rts_rate = MGN_24M;
    451				tcb_desc->bRTSEnable = true;
    452				break;
    453			}
    454		}
    455		if (skb->len > ieee->rts) {
    456			tcb_desc->rts_rate = MGN_24M;
    457			tcb_desc->bRTSEnable = true;
    458			break;
    459		}
    460		if (tcb_desc->bAMPDUEnable) {
    461			tcb_desc->rts_rate = MGN_24M;
    462			tcb_desc->bRTSEnable = false;
    463			break;
    464		}
    465		goto NO_PROTECTION;
    466	}
    467	if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
    468		tcb_desc->bUseShortPreamble = true;
    469	if (ieee->iw_mode == IW_MODE_MASTER)
    470		goto NO_PROTECTION;
    471	return;
    472NO_PROTECTION:
    473	tcb_desc->bRTSEnable	= false;
    474	tcb_desc->bCTSEnable	= false;
    475	tcb_desc->rts_rate	= 0;
    476	tcb_desc->RTSSC		= 0;
    477	tcb_desc->bRTSBW	= false;
    478}
    479
    480
    481static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
    482				     struct cb_desc *tcb_desc)
    483{
    484	if (ieee->bTxDisableRateFallBack)
    485		tcb_desc->bTxDisableRateFallBack = true;
    486
    487	if (ieee->bTxUseDriverAssingedRate)
    488		tcb_desc->bTxUseDriverAssingedRate = true;
    489	if (!tcb_desc->bTxDisableRateFallBack ||
    490	    !tcb_desc->bTxUseDriverAssingedRate) {
    491		if (ieee->iw_mode == IW_MODE_INFRA ||
    492		    ieee->iw_mode == IW_MODE_ADHOC)
    493			tcb_desc->RATRIndex = 0;
    494	}
    495}
    496
    497static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
    498			       u8 *dst)
    499{
    500	u16 seqnum = 0;
    501
    502	if (is_multicast_ether_addr(dst))
    503		return 0;
    504	if (IsQoSDataFrame(skb->data)) {
    505		struct tx_ts_record *pTS = NULL;
    506
    507		if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
    508		    skb->priority, TX_DIR, true))
    509			return 0;
    510		seqnum = pTS->TxCurSeq;
    511		pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
    512		return seqnum;
    513	}
    514	return 0;
    515}
    516
    517static int wme_downgrade_ac(struct sk_buff *skb)
    518{
    519	switch (skb->priority) {
    520	case 6:
    521	case 7:
    522		skb->priority = 5; /* VO -> VI */
    523		return 0;
    524	case 4:
    525	case 5:
    526		skb->priority = 3; /* VI -> BE */
    527		return 0;
    528	case 0:
    529	case 3:
    530		skb->priority = 1; /* BE -> BK */
    531		return 0;
    532	default:
    533		return -1;
    534	}
    535}
    536
    537static u8 rtllib_current_rate(struct rtllib_device *ieee)
    538{
    539	if (ieee->mode & IEEE_MODE_MASK)
    540		return ieee->rate;
    541
    542	if (ieee->HTCurrentOperaRate)
    543		return ieee->HTCurrentOperaRate;
    544	else
    545		return ieee->rate & 0x7F;
    546}
    547
    548static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
    549{
    550	struct rtllib_device *ieee = (struct rtllib_device *)
    551				     netdev_priv_rsl(dev);
    552	struct rtllib_txb *txb = NULL;
    553	struct rtllib_hdr_3addrqos *frag_hdr;
    554	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
    555	unsigned long flags;
    556	struct net_device_stats *stats = &ieee->stats;
    557	int ether_type = 0, encrypt;
    558	int bytes, fc, qos_ctl = 0, hdr_len;
    559	struct sk_buff *skb_frag;
    560	struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
    561		.duration_id = 0,
    562		.seq_ctl = 0,
    563		.qos_ctl = 0
    564	};
    565	int qos_activated = ieee->current_network.qos_data.active;
    566	u8 dest[ETH_ALEN];
    567	u8 src[ETH_ALEN];
    568	struct lib80211_crypt_data *crypt = NULL;
    569	struct cb_desc *tcb_desc;
    570	u8 bIsMulticast = false;
    571	u8 IsAmsdu = false;
    572	bool	bdhcp = false;
    573
    574	spin_lock_irqsave(&ieee->lock, flags);
    575
    576	/* If there is no driver handler to take the TXB, don't bother
    577	 * creating it...
    578	 */
    579	if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
    580	   IEEE_SOFTMAC_TX_QUEUE)) ||
    581	   ((!ieee->softmac_data_hard_start_xmit &&
    582	   (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
    583		netdev_warn(ieee->dev, "No xmit handler.\n");
    584		goto success;
    585	}
    586
    587
    588	if (likely(ieee->raw_tx == 0)) {
    589		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
    590			netdev_warn(ieee->dev, "skb too small (%d).\n",
    591				    skb->len);
    592			goto success;
    593		}
    594		/* Save source and destination addresses */
    595		ether_addr_copy(dest, skb->data);
    596		ether_addr_copy(src, skb->data + ETH_ALEN);
    597
    598		memset(skb->cb, 0, sizeof(skb->cb));
    599		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
    600
    601		if (ieee->iw_mode == IW_MODE_MONITOR) {
    602			txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
    603			if (unlikely(!txb)) {
    604				netdev_warn(ieee->dev,
    605					    "Could not allocate TXB\n");
    606				goto failed;
    607			}
    608
    609			txb->encrypted = 0;
    610			txb->payload_size = cpu_to_le16(skb->len);
    611			skb_put_data(txb->fragments[0], skb->data, skb->len);
    612
    613			goto success;
    614		}
    615
    616		if (skb->len > 282) {
    617			if (ether_type == ETH_P_IP) {
    618				const struct iphdr *ip = (struct iphdr *)
    619					((u8 *)skb->data+14);
    620				if (ip->protocol == IPPROTO_UDP) {
    621					struct udphdr *udp;
    622
    623					udp = (struct udphdr *)((u8 *)ip +
    624					      (ip->ihl << 2));
    625					if (((((u8 *)udp)[1] == 68) &&
    626					   (((u8 *)udp)[3] == 67)) ||
    627					   ((((u8 *)udp)[1] == 67) &&
    628					   (((u8 *)udp)[3] == 68))) {
    629						bdhcp = true;
    630						ieee->LPSDelayCnt = 200;
    631					}
    632				}
    633			} else if (ether_type == ETH_P_ARP) {
    634				netdev_info(ieee->dev,
    635					    "=================>DHCP Protocol start tx ARP pkt!!\n");
    636				bdhcp = true;
    637				ieee->LPSDelayCnt =
    638					 ieee->current_network.tim.tim_count;
    639			}
    640		}
    641
    642		skb->priority = rtllib_classify(skb, IsAmsdu);
    643		crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
    644		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
    645			ieee->host_encrypt && crypt && crypt->ops;
    646		if (!encrypt && ieee->ieee802_1x &&
    647		    ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
    648			stats->tx_dropped++;
    649			goto success;
    650		}
    651		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
    652			struct eapol *eap = (struct eapol *)(skb->data +
    653				sizeof(struct ethhdr) - SNAP_SIZE -
    654				sizeof(u16));
    655			netdev_dbg(ieee->dev,
    656				   "TX: IEEE 802.11 EAPOL frame: %s\n",
    657				   eap_get_type(eap->type));
    658		}
    659
    660		/* Advance the SKB to the start of the payload */
    661		skb_pull(skb, sizeof(struct ethhdr));
    662
    663		/* Determine total amount of storage required for TXB packets */
    664		bytes = skb->len + SNAP_SIZE + sizeof(u16);
    665
    666		if (encrypt)
    667			fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
    668		else
    669			fc = RTLLIB_FTYPE_DATA;
    670
    671		if (qos_activated)
    672			fc |= RTLLIB_STYPE_QOS_DATA;
    673		else
    674			fc |= RTLLIB_STYPE_DATA;
    675
    676		if (ieee->iw_mode == IW_MODE_INFRA) {
    677			fc |= RTLLIB_FCTL_TODS;
    678			/* To DS: Addr1 = BSSID, Addr2 = SA,
    679			 * Addr3 = DA
    680			 */
    681			ether_addr_copy(header.addr1,
    682					ieee->current_network.bssid);
    683			ether_addr_copy(header.addr2, src);
    684			if (IsAmsdu)
    685				ether_addr_copy(header.addr3,
    686						ieee->current_network.bssid);
    687			else
    688				ether_addr_copy(header.addr3, dest);
    689		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
    690			/* not From/To DS: Addr1 = DA, Addr2 = SA,
    691			 * Addr3 = BSSID
    692			 */
    693			ether_addr_copy(header.addr1, dest);
    694			ether_addr_copy(header.addr2, src);
    695			ether_addr_copy(header.addr3,
    696					ieee->current_network.bssid);
    697		}
    698
    699		bIsMulticast = is_multicast_ether_addr(header.addr1);
    700
    701		header.frame_ctl = cpu_to_le16(fc);
    702
    703		/* Determine fragmentation size based on destination (multicast
    704		 * and broadcast are not fragmented)
    705		 */
    706		if (bIsMulticast) {
    707			frag_size = MAX_FRAG_THRESHOLD;
    708			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
    709		} else {
    710			frag_size = ieee->fts;
    711			qos_ctl = 0;
    712		}
    713
    714		if (qos_activated) {
    715			hdr_len = RTLLIB_3ADDR_LEN + 2;
    716
    717			/* in case we are a client verify acm is not set for this ac */
    718			while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
    719				netdev_info(ieee->dev, "skb->priority = %x\n",
    720						skb->priority);
    721				if (wme_downgrade_ac(skb))
    722					break;
    723				netdev_info(ieee->dev, "converted skb->priority = %x\n",
    724					   skb->priority);
    725			}
    726
    727			qos_ctl |= skb->priority;
    728			header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
    729
    730		} else {
    731			hdr_len = RTLLIB_3ADDR_LEN;
    732		}
    733		/* Determine amount of payload per fragment.  Regardless of if
    734		 * this stack is providing the full 802.11 header, one will
    735		 * eventually be affixed to this fragment -- so we must account
    736		 * for it when determining the amount of payload space.
    737		 */
    738		bytes_per_frag = frag_size - hdr_len;
    739		if (ieee->config &
    740		   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
    741			bytes_per_frag -= RTLLIB_FCS_LEN;
    742
    743		/* Each fragment may need to have room for encrypting
    744		 * pre/postfix
    745		 */
    746		if (encrypt) {
    747			bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
    748				crypt->ops->extra_mpdu_postfix_len +
    749				crypt->ops->extra_msdu_prefix_len +
    750				crypt->ops->extra_msdu_postfix_len;
    751		}
    752		/* Number of fragments is the total bytes_per_frag /
    753		 * payload_per_fragment
    754		 */
    755		nr_frags = bytes / bytes_per_frag;
    756		bytes_last_frag = bytes % bytes_per_frag;
    757		if (bytes_last_frag)
    758			nr_frags++;
    759		else
    760			bytes_last_frag = bytes_per_frag;
    761
    762		/* When we allocate the TXB we allocate enough space for the
    763		 * reserve and full fragment bytes (bytes_per_frag doesn't
    764		 * include prefix, postfix, header, FCS, etc.)
    765		 */
    766		txb = rtllib_alloc_txb(nr_frags, frag_size +
    767				       ieee->tx_headroom, GFP_ATOMIC);
    768		if (unlikely(!txb)) {
    769			netdev_warn(ieee->dev, "Could not allocate TXB\n");
    770			goto failed;
    771		}
    772		txb->encrypted = encrypt;
    773		txb->payload_size = cpu_to_le16(bytes);
    774
    775		if (qos_activated)
    776			txb->queue_index = UP2AC(skb->priority);
    777		else
    778			txb->queue_index = WME_AC_BE;
    779
    780		for (i = 0; i < nr_frags; i++) {
    781			skb_frag = txb->fragments[i];
    782			tcb_desc = (struct cb_desc *)(skb_frag->cb +
    783				    MAX_DEV_ADDR_SIZE);
    784			if (qos_activated) {
    785				skb_frag->priority = skb->priority;
    786				tcb_desc->queue_index =  UP2AC(skb->priority);
    787			} else {
    788				skb_frag->priority = WME_AC_BE;
    789				tcb_desc->queue_index = WME_AC_BE;
    790			}
    791			skb_reserve(skb_frag, ieee->tx_headroom);
    792
    793			if (encrypt) {
    794				if (ieee->hwsec_active)
    795					tcb_desc->bHwSec = 1;
    796				else
    797					tcb_desc->bHwSec = 0;
    798				skb_reserve(skb_frag,
    799					    crypt->ops->extra_mpdu_prefix_len +
    800					    crypt->ops->extra_msdu_prefix_len);
    801			} else {
    802				tcb_desc->bHwSec = 0;
    803			}
    804			frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
    805
    806			/* If this is not the last fragment, then add the
    807			 * MOREFRAGS bit to the frame control
    808			 */
    809			if (i != nr_frags - 1) {
    810				frag_hdr->frame_ctl = cpu_to_le16(
    811					fc | RTLLIB_FCTL_MOREFRAGS);
    812				bytes = bytes_per_frag;
    813
    814			} else {
    815				/* The last fragment has the remaining length */
    816				bytes = bytes_last_frag;
    817			}
    818			if ((qos_activated) && (!bIsMulticast)) {
    819				frag_hdr->seq_ctl =
    820					 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
    821							     header.addr1));
    822				frag_hdr->seq_ctl =
    823					 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
    824			} else {
    825				frag_hdr->seq_ctl =
    826					 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
    827			}
    828			/* Put a SNAP header on the first fragment */
    829			if (i == 0) {
    830				rtllib_put_snap(
    831					skb_put(skb_frag, SNAP_SIZE +
    832					sizeof(u16)), ether_type);
    833				bytes -= SNAP_SIZE + sizeof(u16);
    834			}
    835
    836			skb_put_data(skb_frag, skb->data, bytes);
    837
    838			/* Advance the SKB... */
    839			skb_pull(skb, bytes);
    840
    841			/* Encryption routine will move the header forward in
    842			 * order to insert the IV between the header and the
    843			 * payload
    844			 */
    845			if (encrypt)
    846				rtllib_encrypt_fragment(ieee, skb_frag,
    847							hdr_len);
    848			if (ieee->config &
    849			   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
    850				skb_put(skb_frag, 4);
    851		}
    852
    853		if ((qos_activated) && (!bIsMulticast)) {
    854			if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
    855				ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
    856			else
    857				ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
    858		} else {
    859			if (ieee->seq_ctrl[0] == 0xFFF)
    860				ieee->seq_ctrl[0] = 0;
    861			else
    862				ieee->seq_ctrl[0]++;
    863		}
    864	} else {
    865		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
    866			netdev_warn(ieee->dev, "skb too small (%d).\n",
    867				    skb->len);
    868			goto success;
    869		}
    870
    871		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
    872		if (!txb) {
    873			netdev_warn(ieee->dev, "Could not allocate TXB\n");
    874			goto failed;
    875		}
    876
    877		txb->encrypted = 0;
    878		txb->payload_size = cpu_to_le16(skb->len);
    879		skb_put_data(txb->fragments[0], skb->data, skb->len);
    880	}
    881
    882 success:
    883	if (txb) {
    884		tcb_desc = (struct cb_desc *)
    885				(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
    886		tcb_desc->bTxEnableFwCalcDur = 1;
    887		tcb_desc->priority = skb->priority;
    888
    889		if (ether_type == ETH_P_PAE) {
    890			if (ieee->pHTInfo->IOTAction &
    891			    HT_IOT_ACT_WA_IOT_Broadcom) {
    892				tcb_desc->data_rate =
    893					 MgntQuery_TxRateExcludeCCKRates(ieee);
    894				tcb_desc->bTxDisableRateFallBack = false;
    895			} else {
    896				tcb_desc->data_rate = ieee->basic_rate;
    897				tcb_desc->bTxDisableRateFallBack = 1;
    898			}
    899
    900
    901			tcb_desc->RATRIndex = 7;
    902			tcb_desc->bTxUseDriverAssingedRate = 1;
    903		} else {
    904			if (is_multicast_ether_addr(header.addr1))
    905				tcb_desc->bMulticast = 1;
    906			if (is_broadcast_ether_addr(header.addr1))
    907				tcb_desc->bBroadcast = 1;
    908			rtllib_txrate_selectmode(ieee, tcb_desc);
    909			if (tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
    910				tcb_desc->data_rate = ieee->basic_rate;
    911			else
    912				tcb_desc->data_rate = rtllib_current_rate(ieee);
    913
    914			if (bdhcp) {
    915				if (ieee->pHTInfo->IOTAction &
    916				    HT_IOT_ACT_WA_IOT_Broadcom) {
    917					tcb_desc->data_rate =
    918					   MgntQuery_TxRateExcludeCCKRates(ieee);
    919					tcb_desc->bTxDisableRateFallBack = false;
    920				} else {
    921					tcb_desc->data_rate = MGN_1M;
    922					tcb_desc->bTxDisableRateFallBack = 1;
    923				}
    924
    925
    926				tcb_desc->RATRIndex = 7;
    927				tcb_desc->bTxUseDriverAssingedRate = 1;
    928				tcb_desc->bdhcp = 1;
    929			}
    930
    931			rtllib_query_ShortPreambleMode(ieee, tcb_desc);
    932			rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
    933						tcb_desc);
    934			rtllib_query_HTCapShortGI(ieee, tcb_desc);
    935			rtllib_query_BandwidthMode(ieee, tcb_desc);
    936			rtllib_query_protectionmode(ieee, tcb_desc,
    937						    txb->fragments[0]);
    938		}
    939	}
    940	spin_unlock_irqrestore(&ieee->lock, flags);
    941	dev_kfree_skb_any(skb);
    942	if (txb) {
    943		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
    944			dev->stats.tx_packets++;
    945			dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
    946			rtllib_softmac_xmit(txb, ieee);
    947		} else {
    948			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
    949				stats->tx_packets++;
    950				stats->tx_bytes += le16_to_cpu(txb->payload_size);
    951				return 0;
    952			}
    953			rtllib_txb_free(txb);
    954		}
    955	}
    956
    957	return 0;
    958
    959 failed:
    960	spin_unlock_irqrestore(&ieee->lock, flags);
    961	netif_stop_queue(dev);
    962	stats->tx_errors++;
    963	return 1;
    964
    965}
    966
    967int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
    968{
    969	memset(skb->cb, 0, sizeof(skb->cb));
    970	return rtllib_xmit_inter(skb, dev);
    971}
    972EXPORT_SYMBOL(rtllib_xmit);