cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uap_txrx.c (16481B)


      1/*
      2 * NXP Wireless LAN device driver: AP TX and RX data handling
      3 *
      4 * Copyright 2011-2020 NXP
      5 *
      6 * This software file (the "File") is distributed by NXP
      7 * under the terms of the GNU General Public License Version 2, June 1991
      8 * (the "License").  You may use, redistribute and/or modify this File in
      9 * accordance with the terms and conditions of the License, a copy of which
     10 * is available by writing to the Free Software Foundation, Inc.,
     11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
     12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
     13 *
     14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
     15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
     16 * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
     17 * this warranty disclaimer.
     18 */
     19
     20#include "decl.h"
     21#include "ioctl.h"
     22#include "main.h"
     23#include "wmm.h"
     24#include "11n_aggr.h"
     25#include "11n_rxreorder.h"
     26
     27/* This function checks if particular RA list has packets more than low bridge
     28 * packet threshold and then deletes packet from this RA list.
     29 * Function deletes packets from such RA list and returns true. If no such list
     30 * is found, false is returned.
     31 */
     32static bool
     33mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
     34				  struct list_head *ra_list_head,
     35				  int tid)
     36{
     37	struct mwifiex_ra_list_tbl *ra_list;
     38	struct sk_buff *skb, *tmp;
     39	bool pkt_deleted = false;
     40	struct mwifiex_txinfo *tx_info;
     41	struct mwifiex_adapter *adapter = priv->adapter;
     42
     43	list_for_each_entry(ra_list, ra_list_head, list) {
     44		if (skb_queue_empty(&ra_list->skb_head))
     45			continue;
     46
     47		skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
     48			tx_info = MWIFIEX_SKB_TXCB(skb);
     49			if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
     50				__skb_unlink(skb, &ra_list->skb_head);
     51				mwifiex_write_data_complete(adapter, skb, 0,
     52							    -1);
     53				if (ra_list->tx_paused)
     54					priv->wmm.pkts_paused[tid]--;
     55				else
     56					atomic_dec(&priv->wmm.tx_pkts_queued);
     57				pkt_deleted = true;
     58			}
     59			if ((atomic_read(&adapter->pending_bridged_pkts) <=
     60					     MWIFIEX_BRIDGED_PKTS_THR_LOW))
     61				break;
     62		}
     63	}
     64
     65	return pkt_deleted;
     66}
     67
     68/* This function deletes packets from particular RA List. RA list index
     69 * from which packets are deleted is preserved so that packets from next RA
     70 * list are deleted upon subsequent call thus maintaining fairness.
     71 */
     72static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
     73{
     74	struct list_head *ra_list;
     75	int i;
     76
     77	spin_lock_bh(&priv->wmm.ra_list_spinlock);
     78
     79	for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
     80		if (priv->del_list_idx == MAX_NUM_TID)
     81			priv->del_list_idx = 0;
     82		ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
     83		if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) {
     84			priv->del_list_idx++;
     85			break;
     86		}
     87	}
     88
     89	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
     90}
     91
     92
     93static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
     94					 struct sk_buff *skb)
     95{
     96	struct mwifiex_adapter *adapter = priv->adapter;
     97	struct uap_rxpd *uap_rx_pd;
     98	struct rx_packet_hdr *rx_pkt_hdr;
     99	struct sk_buff *new_skb;
    100	struct mwifiex_txinfo *tx_info;
    101	int hdr_chop;
    102	struct ethhdr *p_ethhdr;
    103	struct mwifiex_sta_node *src_node;
    104	int index;
    105
    106	uap_rx_pd = (struct uap_rxpd *)(skb->data);
    107	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
    108
    109	if ((atomic_read(&adapter->pending_bridged_pkts) >=
    110					     MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
    111		mwifiex_dbg(priv->adapter, ERROR,
    112			    "Tx: Bridge packet limit reached. Drop packet!\n");
    113		kfree_skb(skb);
    114		mwifiex_uap_cleanup_tx_queues(priv);
    115		return;
    116	}
    117
    118	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
    119		     sizeof(bridge_tunnel_header))) ||
    120	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
    121		     sizeof(rfc1042_header)) &&
    122	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
    123	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
    124		/* Replace the 803 header and rfc1042 header (llc/snap) with
    125		 * an Ethernet II header, keep the src/dst and snap_type
    126		 * (ethertype).
    127		 *
    128		 * The firmware only passes up SNAP frames converting all RX
    129		 * data from 802.11 to 802.2/LLC/SNAP frames.
    130		 *
    131		 * To create the Ethernet II, just move the src, dst address
    132		 * right before the snap_type.
    133		 */
    134		p_ethhdr = (struct ethhdr *)
    135			((u8 *)(&rx_pkt_hdr->eth803_hdr)
    136			 + sizeof(rx_pkt_hdr->eth803_hdr)
    137			 + sizeof(rx_pkt_hdr->rfc1042_hdr)
    138			 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
    139			 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
    140			 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
    141		memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
    142		       sizeof(p_ethhdr->h_source));
    143		memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
    144		       sizeof(p_ethhdr->h_dest));
    145		/* Chop off the rxpd + the excess memory from
    146		 * 802.2/llc/snap header that was removed.
    147		 */
    148		hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
    149	} else {
    150		/* Chop off the rxpd */
    151		hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
    152	}
    153
    154	/* Chop off the leading header bytes so that it points
    155	 * to the start of either the reconstructed EthII frame
    156	 * or the 802.2/llc/snap frame.
    157	 */
    158	skb_pull(skb, hdr_chop);
    159
    160	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
    161		mwifiex_dbg(priv->adapter, ERROR,
    162			    "data: Tx: insufficient skb headroom %d\n",
    163			    skb_headroom(skb));
    164		/* Insufficient skb headroom - allocate a new skb */
    165		new_skb =
    166			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
    167		if (unlikely(!new_skb)) {
    168			mwifiex_dbg(priv->adapter, ERROR,
    169				    "Tx: cannot allocate new_skb\n");
    170			kfree_skb(skb);
    171			priv->stats.tx_dropped++;
    172			return;
    173		}
    174
    175		kfree_skb(skb);
    176		skb = new_skb;
    177		mwifiex_dbg(priv->adapter, INFO,
    178			    "info: new skb headroom %d\n",
    179			    skb_headroom(skb));
    180	}
    181
    182	tx_info = MWIFIEX_SKB_TXCB(skb);
    183	memset(tx_info, 0, sizeof(*tx_info));
    184	tx_info->bss_num = priv->bss_num;
    185	tx_info->bss_type = priv->bss_type;
    186	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
    187
    188	src_node = mwifiex_get_sta_entry(priv, rx_pkt_hdr->eth803_hdr.h_source);
    189	if (src_node) {
    190		src_node->stats.last_rx = jiffies;
    191		src_node->stats.rx_bytes += skb->len;
    192		src_node->stats.rx_packets++;
    193		src_node->stats.last_tx_rate = uap_rx_pd->rx_rate;
    194		src_node->stats.last_tx_htinfo = uap_rx_pd->ht_info;
    195	}
    196
    197	if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
    198		/* Update bridge packet statistics as the
    199		 * packet is not going to kernel/upper layer.
    200		 */
    201		priv->stats.rx_bytes += skb->len;
    202		priv->stats.rx_packets++;
    203
    204		/* Sending bridge packet to TX queue, so save the packet
    205		 * length in TXCB to update statistics in TX complete.
    206		 */
    207		tx_info->pkt_len = skb->len;
    208	}
    209
    210	__net_timestamp(skb);
    211
    212	index = mwifiex_1d_to_wmm_queue[skb->priority];
    213	atomic_inc(&priv->wmm_tx_pending[index]);
    214	mwifiex_wmm_add_buf_txqueue(priv, skb);
    215	atomic_inc(&adapter->tx_pending);
    216	atomic_inc(&adapter->pending_bridged_pkts);
    217
    218	mwifiex_queue_main_work(priv->adapter);
    219
    220	return;
    221}
    222
    223/*
    224 * This function contains logic for AP packet forwarding.
    225 *
    226 * If a packet is multicast/broadcast, it is sent to kernel/upper layer
    227 * as well as queued back to AP TX queue so that it can be sent to other
    228 * associated stations.
    229 * If a packet is unicast and RA is present in associated station list,
    230 * it is again requeued into AP TX queue.
    231 * If a packet is unicast and RA is not in associated station list,
    232 * packet is forwarded to kernel to handle routing logic.
    233 */
    234int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
    235				  struct sk_buff *skb)
    236{
    237	struct mwifiex_adapter *adapter = priv->adapter;
    238	struct uap_rxpd *uap_rx_pd;
    239	struct rx_packet_hdr *rx_pkt_hdr;
    240	u8 ra[ETH_ALEN];
    241	struct sk_buff *skb_uap;
    242
    243	uap_rx_pd = (struct uap_rxpd *)(skb->data);
    244	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
    245
    246	/* don't do packet forwarding in disconnected state */
    247	if (!priv->media_connected) {
    248		mwifiex_dbg(adapter, ERROR,
    249			    "drop packet in disconnected state.\n");
    250		dev_kfree_skb_any(skb);
    251		return 0;
    252	}
    253
    254	memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
    255
    256	if (is_multicast_ether_addr(ra)) {
    257		skb_uap = skb_copy(skb, GFP_ATOMIC);
    258		mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
    259	} else {
    260		if (mwifiex_get_sta_entry(priv, ra)) {
    261			/* Requeue Intra-BSS packet */
    262			mwifiex_uap_queue_bridged_pkt(priv, skb);
    263			return 0;
    264		}
    265	}
    266
    267	/* Forward unicat/Inter-BSS packets to kernel. */
    268	return mwifiex_process_rx_packet(priv, skb);
    269}
    270
    271int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
    272			    struct sk_buff *skb)
    273{
    274	struct mwifiex_adapter *adapter = priv->adapter;
    275	struct mwifiex_sta_node *src_node;
    276	struct ethhdr *p_ethhdr;
    277	struct sk_buff *skb_uap;
    278	struct mwifiex_txinfo *tx_info;
    279
    280	if (!skb)
    281		return -1;
    282
    283	p_ethhdr = (void *)skb->data;
    284	src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
    285	if (src_node) {
    286		src_node->stats.last_rx = jiffies;
    287		src_node->stats.rx_bytes += skb->len;
    288		src_node->stats.rx_packets++;
    289	}
    290
    291	if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
    292	    mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
    293		if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
    294			skb_uap =
    295			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
    296		else
    297			skb_uap = skb_copy(skb, GFP_ATOMIC);
    298
    299		if (likely(skb_uap)) {
    300			tx_info = MWIFIEX_SKB_TXCB(skb_uap);
    301			memset(tx_info, 0, sizeof(*tx_info));
    302			tx_info->bss_num = priv->bss_num;
    303			tx_info->bss_type = priv->bss_type;
    304			tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
    305			__net_timestamp(skb_uap);
    306			mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
    307			atomic_inc(&adapter->tx_pending);
    308			atomic_inc(&adapter->pending_bridged_pkts);
    309			if ((atomic_read(&adapter->pending_bridged_pkts) >=
    310					MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
    311				mwifiex_dbg(adapter, ERROR,
    312					    "Tx: Bridge packet limit reached. Drop packet!\n");
    313				mwifiex_uap_cleanup_tx_queues(priv);
    314			}
    315
    316		} else {
    317			mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
    318		}
    319
    320		mwifiex_queue_main_work(adapter);
    321		/* Don't forward Intra-BSS unicast packet to upper layer*/
    322		if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
    323			return 0;
    324	}
    325
    326	skb->dev = priv->netdev;
    327	skb->protocol = eth_type_trans(skb, priv->netdev);
    328	skb->ip_summed = CHECKSUM_NONE;
    329
    330	/* This is required only in case of 11n and USB/PCIE as we alloc
    331	 * a buffer of 4K only if its 11N (to be able to receive 4K
    332	 * AMSDU packets). In case of SD we allocate buffers based
    333	 * on the size of packet and hence this is not needed.
    334	 *
    335	 * Modifying the truesize here as our allocation for each
    336	 * skb is 4K but we only receive 2K packets and this cause
    337	 * the kernel to start dropping packets in case where
    338	 * application has allocated buffer based on 2K size i.e.
    339	 * if there a 64K packet received (in IP fragments and
    340	 * application allocates 64K to receive this packet but
    341	 * this packet would almost double up because we allocate
    342	 * each 1.5K fragment in 4K and pass it up. As soon as the
    343	 * 64K limit hits kernel will start to drop rest of the
    344	 * fragments. Currently we fail the Filesndl-ht.scr script
    345	 * for UDP, hence this fix
    346	 */
    347	if ((adapter->iface_type == MWIFIEX_USB ||
    348	     adapter->iface_type == MWIFIEX_PCIE) &&
    349	    skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
    350		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
    351
    352	/* Forward multicast/broadcast packet to upper layer*/
    353	netif_rx(skb);
    354	return 0;
    355}
    356
    357/*
    358 * This function processes the packet received on AP interface.
    359 *
    360 * The function looks into the RxPD and performs sanity tests on the
    361 * received buffer to ensure its a valid packet before processing it
    362 * further. If the packet is determined to be aggregated, it is
    363 * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
    364 *
    365 * The completion callback is called after processing is complete.
    366 */
    367int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
    368				  struct sk_buff *skb)
    369{
    370	struct mwifiex_adapter *adapter = priv->adapter;
    371	int ret;
    372	struct uap_rxpd *uap_rx_pd;
    373	struct rx_packet_hdr *rx_pkt_hdr;
    374	u16 rx_pkt_type;
    375	u8 ta[ETH_ALEN], pkt_type;
    376	struct mwifiex_sta_node *node;
    377
    378	uap_rx_pd = (struct uap_rxpd *)(skb->data);
    379	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
    380	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
    381
    382	ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
    383
    384	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
    385	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
    386		mwifiex_dbg(adapter, ERROR,
    387			    "wrong rx packet: len=%d, offset=%d, length=%d\n",
    388			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
    389			    le16_to_cpu(uap_rx_pd->rx_pkt_length));
    390		priv->stats.rx_dropped++;
    391
    392		node = mwifiex_get_sta_entry(priv, ta);
    393		if (node)
    394			node->stats.tx_failed++;
    395
    396		dev_kfree_skb_any(skb);
    397		return 0;
    398	}
    399
    400	if (rx_pkt_type == PKT_TYPE_MGMT) {
    401		ret = mwifiex_process_mgmt_packet(priv, skb);
    402		if (ret)
    403			mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed");
    404		dev_kfree_skb_any(skb);
    405		return ret;
    406	}
    407
    408
    409	if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
    410		spin_lock_bh(&priv->sta_list_spinlock);
    411		node = mwifiex_get_sta_entry(priv, ta);
    412		if (node)
    413			node->rx_seq[uap_rx_pd->priority] =
    414						le16_to_cpu(uap_rx_pd->seq_num);
    415		spin_unlock_bh(&priv->sta_list_spinlock);
    416	}
    417
    418	if (!priv->ap_11n_enabled ||
    419	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
    420	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
    421		ret = mwifiex_handle_uap_rx_forward(priv, skb);
    422		return ret;
    423	}
    424
    425	/* Reorder and send to kernel */
    426	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
    427	ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
    428					 uap_rx_pd->priority, ta, pkt_type,
    429					 skb);
    430
    431	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
    432		dev_kfree_skb_any(skb);
    433
    434	if (ret)
    435		priv->stats.rx_dropped++;
    436
    437	return ret;
    438}
    439
    440/*
    441 * This function fills the TxPD for AP tx packets.
    442 *
    443 * The Tx buffer received by this function should already have the
    444 * header space allocated for TxPD.
    445 *
    446 * This function inserts the TxPD in between interface header and actual
    447 * data and adjusts the buffer pointers accordingly.
    448 *
    449 * The following TxPD fields are set by this function, as required -
    450 *      - BSS number
    451 *      - Tx packet length and offset
    452 *      - Priority
    453 *      - Packet delay
    454 *      - Priority specific Tx control
    455 *      - Flags
    456 */
    457void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
    458			       struct sk_buff *skb)
    459{
    460	struct mwifiex_adapter *adapter = priv->adapter;
    461	struct uap_txpd *txpd;
    462	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
    463	int pad;
    464	u16 pkt_type, pkt_offset;
    465	int hroom = adapter->intf_hdr_len;
    466
    467	if (!skb->len) {
    468		mwifiex_dbg(adapter, ERROR,
    469			    "Tx: bad packet length: %d\n", skb->len);
    470		tx_info->status_code = -1;
    471		return skb->data;
    472	}
    473
    474	BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
    475
    476	pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
    477
    478	pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) &
    479	       (MWIFIEX_DMA_ALIGN_SZ - 1);
    480
    481	skb_push(skb, sizeof(*txpd) + pad);
    482
    483	txpd = (struct uap_txpd *)skb->data;
    484	memset(txpd, 0, sizeof(*txpd));
    485	txpd->bss_num = priv->bss_num;
    486	txpd->bss_type = priv->bss_type;
    487	txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) +
    488						pad)));
    489	txpd->priority = (u8)skb->priority;
    490
    491	txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
    492
    493	if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
    494	    tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
    495		txpd->tx_token_id = tx_info->ack_frame_id;
    496		txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
    497	}
    498
    499	if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
    500		/*
    501		 * Set the priority specific tx_control field, setting of 0 will
    502		 * cause the default value to be used later in this function.
    503		 */
    504		txpd->tx_control =
    505		    cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
    506
    507	/* Offset of actual data */
    508	pkt_offset = sizeof(*txpd) + pad;
    509	if (pkt_type == PKT_TYPE_MGMT) {
    510		/* Set the packet type and add header for management frame */
    511		txpd->tx_pkt_type = cpu_to_le16(pkt_type);
    512		pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
    513	}
    514
    515	txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
    516
    517	/* make space for adapter->intf_hdr_len */
    518	skb_push(skb, hroom);
    519
    520	if (!txpd->tx_control)
    521		/* TxCtrl set by user or default */
    522		txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
    523
    524	return skb->data;
    525}