cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rx.c (16816B)


      1/*
      2 * Copyright (c) 2016 Citrix Systems Inc.
      3 * Copyright (c) 2002-2005, K A Fraser
      4 *
      5 * This program is free software; you can redistribute it and/or
      6 * modify it under the terms of the GNU General Public License version 2
      7 * as published by the Free Software Foundation; or, when distributed
      8 * separately from the Linux kernel or incorporated into other
      9 * software packages, subject to the following license:
     10 *
     11 * Permission is hereby granted, free of charge, to any person obtaining a copy
     12 * of this source file (the "Software"), to deal in the Software without
     13 * restriction, including without limitation the rights to use, copy, modify,
     14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
     15 * and to permit persons to whom the Software is furnished to do so, subject to
     16 * the following conditions:
     17 *
     18 * The above copyright notice and this permission notice shall be included in
     19 * all copies or substantial portions of the Software.
     20 *
     21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     27 * IN THE SOFTWARE.
     28 */
     29#include "common.h"
     30
     31#include <linux/kthread.h>
     32
     33#include <xen/xen.h>
     34#include <xen/events.h>
     35
     36/*
     37 * Update the needed ring page slots for the first SKB queued.
     38 * Note that any call sequence outside the RX thread calling this function
     39 * needs to wake up the RX thread via a call of xenvif_kick_thread()
     40 * afterwards in order to avoid a race with putting the thread to sleep.
     41 */
     42static void xenvif_update_needed_slots(struct xenvif_queue *queue,
     43				       const struct sk_buff *skb)
     44{
     45	unsigned int needed = 0;
     46
     47	if (skb) {
     48		needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
     49		if (skb_is_gso(skb))
     50			needed++;
     51		if (skb->sw_hash)
     52			needed++;
     53	}
     54
     55	WRITE_ONCE(queue->rx_slots_needed, needed);
     56}
     57
     58static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
     59{
     60	RING_IDX prod, cons;
     61	unsigned int needed;
     62
     63	needed = READ_ONCE(queue->rx_slots_needed);
     64	if (!needed)
     65		return false;
     66
     67	do {
     68		prod = queue->rx.sring->req_prod;
     69		cons = queue->rx.req_cons;
     70
     71		if (prod - cons >= needed)
     72			return true;
     73
     74		queue->rx.sring->req_event = prod + 1;
     75
     76		/* Make sure event is visible before we check prod
     77		 * again.
     78		 */
     79		mb();
     80	} while (queue->rx.sring->req_prod != prod);
     81
     82	return false;
     83}
     84
     85void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
     86{
     87	unsigned long flags;
     88
     89	spin_lock_irqsave(&queue->rx_queue.lock, flags);
     90
     91	if (queue->rx_queue_len >= queue->rx_queue_max) {
     92		struct net_device *dev = queue->vif->dev;
     93
     94		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
     95		kfree_skb(skb);
     96		queue->vif->dev->stats.rx_dropped++;
     97	} else {
     98		if (skb_queue_empty(&queue->rx_queue))
     99			xenvif_update_needed_slots(queue, skb);
    100
    101		__skb_queue_tail(&queue->rx_queue, skb);
    102
    103		queue->rx_queue_len += skb->len;
    104	}
    105
    106	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
    107}
    108
    109static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
    110{
    111	struct sk_buff *skb;
    112
    113	spin_lock_irq(&queue->rx_queue.lock);
    114
    115	skb = __skb_dequeue(&queue->rx_queue);
    116	if (skb) {
    117		xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
    118
    119		queue->rx_queue_len -= skb->len;
    120		if (queue->rx_queue_len < queue->rx_queue_max) {
    121			struct netdev_queue *txq;
    122
    123			txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
    124			netif_tx_wake_queue(txq);
    125		}
    126	}
    127
    128	spin_unlock_irq(&queue->rx_queue.lock);
    129
    130	return skb;
    131}
    132
    133static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
    134{
    135	struct sk_buff *skb;
    136
    137	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
    138		kfree_skb(skb);
    139}
    140
    141static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
    142{
    143	struct sk_buff *skb;
    144
    145	for (;;) {
    146		skb = skb_peek(&queue->rx_queue);
    147		if (!skb)
    148			break;
    149		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
    150			break;
    151		xenvif_rx_dequeue(queue);
    152		kfree_skb(skb);
    153		queue->vif->dev->stats.rx_dropped++;
    154	}
    155}
    156
    157static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
    158{
    159	unsigned int i;
    160	int notify;
    161
    162	gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
    163
    164	for (i = 0; i < queue->rx_copy.num; i++) {
    165		struct gnttab_copy *op;
    166
    167		op = &queue->rx_copy.op[i];
    168
    169		/* If the copy failed, overwrite the status field in
    170		 * the corresponding response.
    171		 */
    172		if (unlikely(op->status != GNTST_okay)) {
    173			struct xen_netif_rx_response *rsp;
    174
    175			rsp = RING_GET_RESPONSE(&queue->rx,
    176						queue->rx_copy.idx[i]);
    177			rsp->status = op->status;
    178		}
    179	}
    180
    181	queue->rx_copy.num = 0;
    182
    183	/* Push responses for all completed packets. */
    184	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
    185	if (notify)
    186		notify_remote_via_irq(queue->rx_irq);
    187
    188	__skb_queue_purge(queue->rx_copy.completed);
    189}
    190
    191static void xenvif_rx_copy_add(struct xenvif_queue *queue,
    192			       struct xen_netif_rx_request *req,
    193			       unsigned int offset, void *data, size_t len)
    194{
    195	struct gnttab_copy *op;
    196	struct page *page;
    197	struct xen_page_foreign *foreign;
    198
    199	if (queue->rx_copy.num == COPY_BATCH_SIZE)
    200		xenvif_rx_copy_flush(queue);
    201
    202	op = &queue->rx_copy.op[queue->rx_copy.num];
    203
    204	page = virt_to_page(data);
    205
    206	op->flags = GNTCOPY_dest_gref;
    207
    208	foreign = xen_page_foreign(page);
    209	if (foreign) {
    210		op->source.domid = foreign->domid;
    211		op->source.u.ref = foreign->gref;
    212		op->flags |= GNTCOPY_source_gref;
    213	} else {
    214		op->source.u.gmfn = virt_to_gfn(data);
    215		op->source.domid  = DOMID_SELF;
    216	}
    217
    218	op->source.offset = xen_offset_in_page(data);
    219	op->dest.u.ref    = req->gref;
    220	op->dest.domid    = queue->vif->domid;
    221	op->dest.offset   = offset;
    222	op->len           = len;
    223
    224	queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
    225	queue->rx_copy.num++;
    226}
    227
    228static unsigned int xenvif_gso_type(struct sk_buff *skb)
    229{
    230	if (skb_is_gso(skb)) {
    231		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
    232			return XEN_NETIF_GSO_TYPE_TCPV4;
    233		else
    234			return XEN_NETIF_GSO_TYPE_TCPV6;
    235	}
    236	return XEN_NETIF_GSO_TYPE_NONE;
    237}
    238
    239struct xenvif_pkt_state {
    240	struct sk_buff *skb;
    241	size_t remaining_len;
    242	struct sk_buff *frag_iter;
    243	int frag; /* frag == -1 => frag_iter->head */
    244	unsigned int frag_offset;
    245	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
    246	unsigned int extra_count;
    247	unsigned int slot;
    248};
    249
    250static void xenvif_rx_next_skb(struct xenvif_queue *queue,
    251			       struct xenvif_pkt_state *pkt)
    252{
    253	struct sk_buff *skb;
    254	unsigned int gso_type;
    255
    256	skb = xenvif_rx_dequeue(queue);
    257
    258	queue->stats.tx_bytes += skb->len;
    259	queue->stats.tx_packets++;
    260
    261	/* Reset packet state. */
    262	memset(pkt, 0, sizeof(struct xenvif_pkt_state));
    263
    264	pkt->skb = skb;
    265	pkt->frag_iter = skb;
    266	pkt->remaining_len = skb->len;
    267	pkt->frag = -1;
    268
    269	gso_type = xenvif_gso_type(skb);
    270	if ((1 << gso_type) & queue->vif->gso_mask) {
    271		struct xen_netif_extra_info *extra;
    272
    273		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
    274
    275		extra->u.gso.type = gso_type;
    276		extra->u.gso.size = skb_shinfo(skb)->gso_size;
    277		extra->u.gso.pad = 0;
    278		extra->u.gso.features = 0;
    279		extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
    280		extra->flags = 0;
    281
    282		pkt->extra_count++;
    283	}
    284
    285	if (queue->vif->xdp_headroom) {
    286		struct xen_netif_extra_info *extra;
    287
    288		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
    289
    290		memset(extra, 0, sizeof(struct xen_netif_extra_info));
    291		extra->u.xdp.headroom = queue->vif->xdp_headroom;
    292		extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
    293		extra->flags = 0;
    294
    295		pkt->extra_count++;
    296	}
    297
    298	if (skb->sw_hash) {
    299		struct xen_netif_extra_info *extra;
    300
    301		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
    302
    303		extra->u.hash.algorithm =
    304			XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
    305
    306		if (skb->l4_hash)
    307			extra->u.hash.type =
    308				skb->protocol == htons(ETH_P_IP) ?
    309				_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
    310				_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
    311		else
    312			extra->u.hash.type =
    313				skb->protocol == htons(ETH_P_IP) ?
    314				_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
    315				_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
    316
    317		*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
    318
    319		extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
    320		extra->flags = 0;
    321
    322		pkt->extra_count++;
    323	}
    324}
    325
    326static void xenvif_rx_complete(struct xenvif_queue *queue,
    327			       struct xenvif_pkt_state *pkt)
    328{
    329	/* All responses are ready to be pushed. */
    330	queue->rx.rsp_prod_pvt = queue->rx.req_cons;
    331
    332	__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
    333}
    334
    335static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
    336{
    337	struct sk_buff *frag_iter = pkt->frag_iter;
    338	unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
    339
    340	pkt->frag++;
    341	pkt->frag_offset = 0;
    342
    343	if (pkt->frag >= nr_frags) {
    344		if (frag_iter == pkt->skb)
    345			pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
    346		else
    347			pkt->frag_iter = frag_iter->next;
    348
    349		pkt->frag = -1;
    350	}
    351}
    352
    353static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
    354				 struct xenvif_pkt_state *pkt,
    355				 unsigned int offset, void **data,
    356				 size_t *len)
    357{
    358	struct sk_buff *frag_iter = pkt->frag_iter;
    359	void *frag_data;
    360	size_t frag_len, chunk_len;
    361
    362	BUG_ON(!frag_iter);
    363
    364	if (pkt->frag == -1) {
    365		frag_data = frag_iter->data;
    366		frag_len = skb_headlen(frag_iter);
    367	} else {
    368		skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
    369
    370		frag_data = skb_frag_address(frag);
    371		frag_len = skb_frag_size(frag);
    372	}
    373
    374	frag_data += pkt->frag_offset;
    375	frag_len -= pkt->frag_offset;
    376
    377	chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
    378	chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
    379					     xen_offset_in_page(frag_data));
    380
    381	pkt->frag_offset += chunk_len;
    382
    383	/* Advance to next frag? */
    384	if (frag_len == chunk_len)
    385		xenvif_rx_next_frag(pkt);
    386
    387	*data = frag_data;
    388	*len = chunk_len;
    389}
    390
    391static void xenvif_rx_data_slot(struct xenvif_queue *queue,
    392				struct xenvif_pkt_state *pkt,
    393				struct xen_netif_rx_request *req,
    394				struct xen_netif_rx_response *rsp)
    395{
    396	unsigned int offset = queue->vif->xdp_headroom;
    397	unsigned int flags;
    398
    399	do {
    400		size_t len;
    401		void *data;
    402
    403		xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
    404		xenvif_rx_copy_add(queue, req, offset, data, len);
    405
    406		offset += len;
    407		pkt->remaining_len -= len;
    408
    409	} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
    410
    411	if (pkt->remaining_len > 0)
    412		flags = XEN_NETRXF_more_data;
    413	else
    414		flags = 0;
    415
    416	if (pkt->slot == 0) {
    417		struct sk_buff *skb = pkt->skb;
    418
    419		if (skb->ip_summed == CHECKSUM_PARTIAL)
    420			flags |= XEN_NETRXF_csum_blank |
    421				 XEN_NETRXF_data_validated;
    422		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
    423			flags |= XEN_NETRXF_data_validated;
    424
    425		if (pkt->extra_count != 0)
    426			flags |= XEN_NETRXF_extra_info;
    427	}
    428
    429	rsp->offset = 0;
    430	rsp->flags = flags;
    431	rsp->id = req->id;
    432	rsp->status = (s16)offset;
    433}
    434
    435static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
    436				 struct xenvif_pkt_state *pkt,
    437				 struct xen_netif_rx_request *req,
    438				 struct xen_netif_rx_response *rsp)
    439{
    440	struct xen_netif_extra_info *extra = (void *)rsp;
    441	unsigned int i;
    442
    443	pkt->extra_count--;
    444
    445	for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
    446		if (pkt->extras[i].type) {
    447			*extra = pkt->extras[i];
    448
    449			if (pkt->extra_count != 0)
    450				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
    451
    452			pkt->extras[i].type = 0;
    453			return;
    454		}
    455	}
    456	BUG();
    457}
    458
    459static void xenvif_rx_skb(struct xenvif_queue *queue)
    460{
    461	struct xenvif_pkt_state pkt;
    462
    463	xenvif_rx_next_skb(queue, &pkt);
    464
    465	queue->last_rx_time = jiffies;
    466
    467	do {
    468		struct xen_netif_rx_request *req;
    469		struct xen_netif_rx_response *rsp;
    470
    471		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
    472		rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
    473
    474		/* Extras must go after the first data slot */
    475		if (pkt.slot != 0 && pkt.extra_count != 0)
    476			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
    477		else
    478			xenvif_rx_data_slot(queue, &pkt, req, rsp);
    479
    480		queue->rx.req_cons++;
    481		pkt.slot++;
    482	} while (pkt.remaining_len > 0 || pkt.extra_count != 0);
    483
    484	xenvif_rx_complete(queue, &pkt);
    485}
    486
    487#define RX_BATCH_SIZE 64
    488
    489void xenvif_rx_action(struct xenvif_queue *queue)
    490{
    491	struct sk_buff_head completed_skbs;
    492	unsigned int work_done = 0;
    493
    494	__skb_queue_head_init(&completed_skbs);
    495	queue->rx_copy.completed = &completed_skbs;
    496
    497	while (xenvif_rx_ring_slots_available(queue) &&
    498	       work_done < RX_BATCH_SIZE) {
    499		xenvif_rx_skb(queue);
    500		work_done++;
    501	}
    502
    503	/* Flush any pending copies and complete all skbs. */
    504	xenvif_rx_copy_flush(queue);
    505}
    506
    507static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
    508{
    509	RING_IDX prod, cons;
    510
    511	prod = queue->rx.sring->req_prod;
    512	cons = queue->rx.req_cons;
    513
    514	return prod - cons;
    515}
    516
    517static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
    518{
    519	unsigned int needed = READ_ONCE(queue->rx_slots_needed);
    520
    521	return !queue->stalled &&
    522		xenvif_rx_queue_slots(queue) < needed &&
    523		time_after(jiffies,
    524			   queue->last_rx_time + queue->vif->stall_timeout);
    525}
    526
    527static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
    528{
    529	unsigned int needed = READ_ONCE(queue->rx_slots_needed);
    530
    531	return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
    532}
    533
    534bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
    535{
    536	return xenvif_rx_ring_slots_available(queue) ||
    537		(queue->vif->stall_timeout &&
    538		 (xenvif_rx_queue_stalled(queue) ||
    539		  xenvif_rx_queue_ready(queue))) ||
    540		(test_kthread && kthread_should_stop()) ||
    541		queue->vif->disabled;
    542}
    543
    544static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
    545{
    546	struct sk_buff *skb;
    547	long timeout;
    548
    549	skb = skb_peek(&queue->rx_queue);
    550	if (!skb)
    551		return MAX_SCHEDULE_TIMEOUT;
    552
    553	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
    554	return timeout < 0 ? 0 : timeout;
    555}
    556
    557/* Wait until the guest Rx thread has work.
    558 *
    559 * The timeout needs to be adjusted based on the current head of the
    560 * queue (and not just the head at the beginning).  In particular, if
    561 * the queue is initially empty an infinite timeout is used and this
    562 * needs to be reduced when a skb is queued.
    563 *
    564 * This cannot be done with wait_event_timeout() because it only
    565 * calculates the timeout once.
    566 */
    567static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
    568{
    569	DEFINE_WAIT(wait);
    570
    571	if (xenvif_have_rx_work(queue, true))
    572		return;
    573
    574	for (;;) {
    575		long ret;
    576
    577		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
    578		if (xenvif_have_rx_work(queue, true))
    579			break;
    580		if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
    581					&queue->eoi_pending) &
    582		    (NETBK_RX_EOI | NETBK_COMMON_EOI))
    583			xen_irq_lateeoi(queue->rx_irq, 0);
    584
    585		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
    586		if (!ret)
    587			break;
    588	}
    589	finish_wait(&queue->wq, &wait);
    590}
    591
    592static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
    593{
    594	struct xenvif *vif = queue->vif;
    595
    596	queue->stalled = true;
    597
    598	/* At least one queue has stalled? Disable the carrier. */
    599	spin_lock(&vif->lock);
    600	if (vif->stalled_queues++ == 0) {
    601		netdev_info(vif->dev, "Guest Rx stalled");
    602		netif_carrier_off(vif->dev);
    603	}
    604	spin_unlock(&vif->lock);
    605}
    606
    607static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
    608{
    609	struct xenvif *vif = queue->vif;
    610
    611	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
    612	queue->stalled = false;
    613
    614	/* All queues are ready? Enable the carrier. */
    615	spin_lock(&vif->lock);
    616	if (--vif->stalled_queues == 0) {
    617		netdev_info(vif->dev, "Guest Rx ready");
    618		netif_carrier_on(vif->dev);
    619	}
    620	spin_unlock(&vif->lock);
    621}
    622
    623int xenvif_kthread_guest_rx(void *data)
    624{
    625	struct xenvif_queue *queue = data;
    626	struct xenvif *vif = queue->vif;
    627
    628	if (!vif->stall_timeout)
    629		xenvif_queue_carrier_on(queue);
    630
    631	for (;;) {
    632		xenvif_wait_for_rx_work(queue);
    633
    634		if (kthread_should_stop())
    635			break;
    636
    637		/* This frontend is found to be rogue, disable it in
    638		 * kthread context. Currently this is only set when
    639		 * netback finds out frontend sends malformed packet,
    640		 * but we cannot disable the interface in softirq
    641		 * context so we defer it here, if this thread is
    642		 * associated with queue 0.
    643		 */
    644		if (unlikely(vif->disabled && queue->id == 0)) {
    645			xenvif_carrier_off(vif);
    646			break;
    647		}
    648
    649		if (!skb_queue_empty(&queue->rx_queue))
    650			xenvif_rx_action(queue);
    651
    652		/* If the guest hasn't provided any Rx slots for a
    653		 * while it's probably not responsive, drop the
    654		 * carrier so packets are dropped earlier.
    655		 */
    656		if (vif->stall_timeout) {
    657			if (xenvif_rx_queue_stalled(queue))
    658				xenvif_queue_carrier_off(queue);
    659			else if (xenvif_rx_queue_ready(queue))
    660				xenvif_queue_carrier_on(queue);
    661		}
    662
    663		/* Queued packets may have foreign pages from other
    664		 * domains.  These cannot be queued indefinitely as
    665		 * this would starve guests of grant refs and transmit
    666		 * slots.
    667		 */
    668		xenvif_rx_queue_drop_expired(queue);
    669
    670		cond_resched();
    671	}
    672
    673	/* Bin any remaining skbs */
    674	xenvif_rx_queue_purge(queue);
    675
    676	return 0;
    677}