diff options
| author | David S. Miller <davem@davemloft.net> | 2014-09-01 17:40:01 -0700 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-09-01 17:40:01 -0700 |
| commit | 53fda7f7f9e88fa9015b2cdabebfaf5477a28241 (patch) | |
| tree | 595d3af7a5ffab41fbf2b77dbbb3c3787f5700bb /include/linux | |
| parent | dace1b54726bffe1c009f7661e3cee6b762f30c8 (diff) | |
| parent | 8dcda22a5d0abaf347b21b057655f3809b91639d (diff) | |
| download | cachepc-linux-53fda7f7f9e88fa9015b2cdabebfaf5477a28241.tar.gz cachepc-linux-53fda7f7f9e88fa9015b2cdabebfaf5477a28241.zip | |
Merge branch 'xmit_list'
David Miller says:
====================
net: Make dev_hard_start_xmit() work fundamentally on lists
After this patch set, dev_hard_start_xmit() will work fundemantally on
any and all SKB lists.
This opens the path for a clean implementation of pulling multiple
packets out during qdisc_restart(), and then passing that blob in one
shot to dev_hard_start_xmit().
There were two main architectural blockers to this:
1) The GSO handling, we kept the original GSO head SKB around simply
because dev_hard_start_xmit() had no way to communicate to the
caller how far into the segmented list it was able to go. Now it
can, so the head GSO can be liberated immediately.
All of the special GSO head SKB destructor et al. handling goes
away too.
2) Validate of VLAN, CSUM, and segmentation characteristics was being
performed inside of dev_hard_start_xmit(). If want to truly batch,
we have to let the higher levels to this. In particular, this is
now dequeue_skb()'s job.
And with those two issues out of the way, it should now be trivial to
build experiments on top of this patch set, all of the framework
should be there now. You could do something as simple as:
skb = q->dequeue(q);
if (skb)
skb = validate_xmit_skb(skb, qdisc_dev(q));
if (skb) {
struct sk_buff *new, *head = skb;
int limit = 5;
do {
new = q->dequeue(q);
if (new)
new = validate_xmit_skb(new, qdisc_dev(q));
if (new) {
skb->next = new;
skb = new;
}
} while (new && --limit);
skb = head;
}
inside of the else branch of dequeue_skb().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/netdevice.h | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 456eb1fe51e8..202c25a9aadf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2827,8 +2827,9 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq); +struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); @@ -3431,17 +3432,24 @@ int __init dev_proc_init(void); #endif static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, - struct sk_buff *skb, struct net_device *dev) + struct sk_buff *skb, struct net_device *dev, + bool more) { - skb->xmit_more = 0; + skb->xmit_more = more ? 1 : 0; return ops->ndo_start_xmit(skb, dev); } -static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) +static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) { const struct net_device_ops *ops = dev->netdev_ops; + int rc; - return __netdev_start_xmit(ops, skb, dev); + rc = __netdev_start_xmit(ops, skb, dev, more); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + + return rc; } int netdev_class_create_file_ns(struct class_attribute *class_attr, |
