summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c95
-rw-r--r--net/core/dev.c238
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/netpoll.c1
-rw-r--r--net/core/pktgen.c83
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/skbuff.c35
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/stream.c6
-rw-r--r--net/core/sysctl_net_core.c4
-rw-r--r--net/core/user_dma.c6
-rw-r--r--net/core/utils.c5
12 files changed, 343 insertions, 147 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 8a28fc93b724..52f577a0f544 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -285,7 +285,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
@@ -315,7 +315,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
@@ -339,6 +339,93 @@ fault:
return -EFAULT;
}
+/**
+ * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying to
+ * @from: io vector to copy to
+ * @len: amount of data to copy to buffer from iovec
+ *
+ * Returns 0 or -EFAULT.
+ * Note: the iovec is modified during the copy.
+ */
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+ struct iovec *from, int len)
+{
+ int start = skb_headlen(skb);
+ int i, copy = start - offset;
+
+ /* Copy header. */
+ if (copy > 0) {
+ if (copy > len)
+ copy = len;
+ if (memcpy_fromiovec(skb->data + offset, from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+
+ /* Copy paged appendix. Hmm... why does this look so complicated? */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end - offset) > 0) {
+ int err;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = frag->page;
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap(page);
+ err = memcpy_fromiovec(vaddr + frag->page_offset +
+ offset - start, from, copy);
+ kunmap(page);
+ if (err)
+ goto fault;
+
+ if (!(len -= copy))
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list; list = list->next) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + list->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_copy_datagram_from_iovec(list,
+ offset - start,
+ from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+ }
+ if (!len)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
@@ -366,7 +453,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
@@ -402,7 +489,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (; list; list=list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
diff --git a/net/core/dev.c b/net/core/dev.c
index cbc34c0db376..60c51f765887 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -261,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU(struct softnet_data, softnet_data);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type
@@ -301,6 +301,7 @@ static const char *netdev_lock_name[] =
"_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
+static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
@@ -313,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
return ARRAY_SIZE(netdev_lock_type) - 1;
}
-static inline void netdev_set_lockdep_class(spinlock_t *lock,
- unsigned short dev_type)
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ unsigned short dev_type)
{
int i;
@@ -322,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock,
lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
netdev_lock_name[i]);
}
+
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+ int i;
+
+ i = netdev_lock_pos(dev->type);
+ lockdep_set_class_and_name(&dev->addr_list_lock,
+ &netdev_addr_lock_key[i],
+ netdev_lock_name[i]);
+}
#else
-static inline void netdev_set_lockdep_class(spinlock_t *lock,
- unsigned short dev_type)
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ unsigned short dev_type)
+{
+}
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
}
#endif
@@ -1325,22 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
}
-void __netif_schedule(struct Qdisc *q)
+static inline void __netif_reschedule(struct Qdisc *q)
{
- if (WARN_ON_ONCE(q == &noop_qdisc))
- return;
+ struct softnet_data *sd;
+ unsigned long flags;
- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
- struct softnet_data *sd;
- unsigned long flags;
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- }
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
@@ -1645,32 +1660,6 @@ out_kfree_skb:
return 0;
}
-/**
- * dev_queue_xmit - transmit a buffer
- * @skb: buffer to transmit
- *
- * Queue a buffer for transmission to a network device. The caller must
- * have set the device and priority and built the buffer before calling
- * this function. The function can be called from an interrupt.
- *
- * A negative errno code is returned on a failure. A success does not
- * guarantee the frame will be transmitted as it may be dropped due
- * to congestion or traffic shaping.
- *
- * -----------------------------------------------------------------------------------
- * I notice this method can also return errors from the queue disciplines,
- * including NET_XMIT_DROP, which is a positive value. So, errors can also
- * be positive.
- *
- * Regardless of the return value, the skb is consumed, so it is currently
- * difficult to retry a send to this method. (You can bump the ref count
- * before sending to hold a reference for retry if you are careful.)
- *
- * When calling this method, interrupts MUST be enabled. This is because
- * the BH enable code must have IRQs enabled so that it will not deadlock.
- * --BLG
- */
-
static u32 simple_tx_hashrnd;
static int simple_tx_hashrnd_initialized = 0;
@@ -1738,6 +1727,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
return netdev_get_tx_queue(dev, queue_index);
}
+/**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+ *
+ * Queue a buffer for transmission to a network device. The caller must
+ * have set the device and priority and built the buffer before calling
+ * this function. The function can be called from an interrupt.
+ *
+ * A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ *
+ * -----------------------------------------------------------------------------------
+ * I notice this method can also return errors from the queue disciplines,
+ * including NET_XMIT_DROP, which is a positive value. So, errors can also
+ * be positive.
+ *
+ * Regardless of the return value, the skb is consumed, so it is currently
+ * difficult to retry a send to this method. (You can bump the ref count
+ * before sending to hold a reference for retry if you are careful.)
+ *
+ * When calling this method, interrupts MUST be enabled. This is because
+ * the BH enable code must have IRQs enabled so that it will not deadlock.
+ * --BLG
+ */
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
@@ -1786,16 +1800,19 @@ gso:
skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
#endif
if (q->enqueue) {
- spinlock_t *root_lock = qdisc_root_lock(q);
+ spinlock_t *root_lock = qdisc_lock(q);
spin_lock(root_lock);
- rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
-
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
spin_unlock(root_lock);
- rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
goto out;
}
@@ -1899,7 +1916,6 @@ int netif_rx(struct sk_buff *skb)
if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
if (queue->input_pkt_queue.qlen) {
enqueue:
- dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue, skb);
local_irq_restore(flags);
return NET_RX_SUCCESS;
@@ -1931,22 +1947,6 @@ int netif_rx_ni(struct sk_buff *skb)
EXPORT_SYMBOL(netif_rx_ni);
-static inline struct net_device *skb_bond(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
-
- if (dev->master) {
- if (skb_bond_should_drop(skb)) {
- kfree_skb(skb);
- return NULL;
- }
- skb->dev = dev->master;
- }
-
- return dev;
-}
-
-
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -1963,7 +1963,7 @@ static void net_tx_action(struct softirq_action *h)
struct sk_buff *skb = clist;
clist = clist->next;
- BUG_TRAP(!atomic_read(&skb->users));
+ WARN_ON(atomic_read(&skb->users));
__kfree_skb(skb);
}
}
@@ -1982,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
- smp_mb__before_clear_bit();
- clear_bit(__QDISC_STATE_SCHED, &q->state);
-
- root_lock = qdisc_root_lock(q);
+ root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
- __netif_schedule(q);
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state))
+ __netif_reschedule(q);
}
}
}
@@ -2090,9 +2092,10 @@ static int ing_filter(struct sk_buff *skb)
rxq = &dev->rx_queue;
q = rxq->qdisc;
- if (q) {
+ if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
- result = qdisc_enqueue_root(skb, q);
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}
@@ -2103,7 +2106,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- if (!skb->dev->rx_queue.qdisc)
+ if (skb->dev->rx_queue.qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
@@ -2173,6 +2176,7 @@ int netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
+ struct net_device *null_or_orig;
int ret = NET_RX_DROP;
__be16 type;
@@ -2186,10 +2190,14 @@ int netif_receive_skb(struct sk_buff *skb)
if (!skb->iif)
skb->iif = skb->dev->ifindex;
- orig_dev = skb_bond(skb);
-
- if (!orig_dev)
- return NET_RX_DROP;
+ null_or_orig = NULL;
+ orig_dev = skb->dev;
+ if (orig_dev->master) {
+ if (skb_bond_should_drop(skb))
+ null_or_orig = orig_dev; /* deliver only exact match */
+ else
+ skb->dev = orig_dev->master;
+ }
__get_cpu_var(netdev_rx_stat).total++;
@@ -2213,7 +2221,8 @@ int netif_receive_skb(struct sk_buff *skb)
#endif
list_for_each_entry_rcu(ptype, &ptype_all, list) {
- if (!ptype->dev || ptype->dev == skb->dev) {
+ if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -2238,7 +2247,8 @@ ncls:
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type &&
- (!ptype->dev || ptype->dev == skb->dev)) {
+ (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -2260,6 +2270,20 @@ out:
return ret;
}
+/* Network device is going away, flush any packets still pending */
+static void flush_backlog(void *arg)
+{
+ struct net_device *dev = arg;
+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &queue->input_pkt_queue);
+ kfree_skb(skb);
+ }
+}
+
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
@@ -2269,7 +2293,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi->weight = weight_p;
do {
struct sk_buff *skb;
- struct net_device *dev;
local_irq_disable();
skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2278,14 +2301,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
local_irq_enable();
break;
}
-
local_irq_enable();
- dev = skb->dev;
-
netif_receive_skb(skb);
-
- dev_put(dev);
} while (++work < quota && jiffies == start_time);
return work;
@@ -2385,7 +2403,7 @@ out:
*/
if (!cpus_empty(net_dma.channel_mask)) {
int chan_idx;
- for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
+ for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
struct dma_chan *chan = net_dma.channels[chan_idx];
if (chan)
dma_async_memcpy_issue_pending(chan);
@@ -3837,7 +3855,7 @@ static void rollback_registered(struct net_device *dev)
dev->uninit(dev);
/* Notifier chain MUST detach us from master device. */
- BUG_TRAP(!dev->master);
+ WARN_ON(dev->master);
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
@@ -3852,7 +3870,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
void *_unused)
{
spin_lock_init(&dev_queue->_xmit_lock);
- netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+ netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
dev_queue->xmit_lock_owner = -1;
}
@@ -3897,6 +3915,7 @@ int register_netdevice(struct net_device *dev)
net = dev_net(dev);
spin_lock_init(&dev->addr_list_lock);
+ netdev_set_addr_lockdep_class(dev);
netdev_init_queue_locks(dev);
dev->iflink = -1;
@@ -3977,6 +3996,10 @@ int register_netdevice(struct net_device *dev)
}
}
+ /* Enable software GSO if SG is supported. */
+ if (dev->features & NETIF_F_SG)
+ dev->features |= NETIF_F_GSO;
+
netdev_initialize_kobject(dev);
ret = netdev_register_kobject(dev);
if (ret)
@@ -4154,13 +4177,15 @@ void netdev_run_todo(void)
dev->reg_state = NETREG_UNREGISTERED;
+ on_each_cpu(flush_backlog, dev, 1);
+
netdev_wait_allrefs(dev);
/* paranoia */
BUG_ON(atomic_read(&dev->refcnt));
- BUG_TRAP(!dev->ip_ptr);
- BUG_TRAP(!dev->ip6_ptr);
- BUG_TRAP(!dev->dn_ptr);
+ WARN_ON(dev->ip_ptr);
+ WARN_ON(dev->ip6_ptr);
+ WARN_ON(dev->dn_ptr);
if (dev->destructor)
dev->destructor(dev);
@@ -4189,6 +4214,7 @@ static void netdev_init_queues(struct net_device *dev)
{
netdev_init_one_queue(dev, &dev->rx_queue, NULL);
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
+ spin_lock_init(&dev->tx_global_lock);
}
/**
@@ -4207,7 +4233,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
{
struct netdev_queue *tx;
struct net_device *dev;
- int alloc_size;
+ size_t alloc_size;
void *p;
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -4227,7 +4253,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
return NULL;
}
- tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
+ tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
if (!tx) {
printk(KERN_ERR "alloc_netdev: Unable to allocate "
"tx qdiscs.\n");
@@ -4519,7 +4545,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
i = 0;
cpu = first_cpu(cpu_online_map);
- for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
+ for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
chan = net_dma->channels[chan_idx];
n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
@@ -4686,6 +4712,26 @@ err_name:
return -ENOMEM;
}
+char *netdev_drivername(struct net_device *dev, char *buffer, int len)
+{
+ struct device_driver *driver;
+ struct device *parent;
+
+ if (len <= 0 || !buffer)
+ return buffer;
+ buffer[0] = 0;
+
+ parent = dev->dev.parent;
+
+ if (!parent)
+ return buffer;
+
+ driver = parent->driver;
+ if (driver && driver->name)
+ strlcpy(buffer, driver->name, len);
+ return buffer;
+}
+
static void __net_exit netdev_exit(struct net *net)
{
kfree(net->dev_name_head);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f62c8af85d38..9d92e41826e7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2281,6 +2281,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
struct neighbour *n = neigh_get_first(seq);
if (n) {
+ --(*pos);
while (*pos) {
n = neigh_get_next(seq, n, pos);
if (!n)
@@ -2341,6 +2342,7 @@ static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
struct pneigh_entry *pn = pneigh_get_first(seq);
if (pn) {
+ --(*pos);
while (*pos) {
pn = pneigh_get_next(seq, pn, pos);
if (!pn)
@@ -2354,10 +2356,11 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
void *rc;
+ loff_t idxpos = *pos;
- rc = neigh_get_idx(seq, pos);
+ rc = neigh_get_idx(seq, &idxpos);
if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
- rc = pneigh_get_idx(seq, pos);
+ rc = pneigh_get_idx(seq, &idxpos);
return rc;
}
@@ -2366,7 +2369,6 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
__acquires(tbl->lock)
{
struct neigh_seq_state *state = seq->private;
- loff_t pos_minus_one;
state->tbl = tbl;
state->bucket = 0;
@@ -2374,8 +2376,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
read_lock_bh(&tbl->lock);
- pos_minus_one = *pos - 1;
- return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
+ return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL(neigh_seq_start);
@@ -2385,7 +2386,7 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
void *rc;
if (v == SEQ_START_TOKEN) {
- rc = neigh_get_idx(seq, pos);
+ rc = neigh_get_first(seq);
goto out;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c12720895ecf..6c7af390be0a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq) ||
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c7d484f7e1c4..a756847e3814 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -168,7 +168,7 @@
#include <asm/div64.h> /* do_div */
#include <asm/timex.h>
-#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n"
+#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
@@ -189,6 +189,7 @@
#define F_FLOW_SEQ (1<<11) /* Sequential flows */
#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
+#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
/* Thread control flag bits */
#define T_TERMINATE (1<<0)
@@ -621,6 +622,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_QUEUE_MAP_RND)
seq_printf(seq, "QUEUE_MAP_RND ");
+ if (pkt_dev->flags & F_QUEUE_MAP_CPU)
+ seq_printf(seq, "QUEUE_MAP_CPU ");
+
if (pkt_dev->cflows) {
if (pkt_dev->flags & F_FLOW_SEQ)
seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
@@ -1134,6 +1138,12 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!QUEUE_MAP_RND") == 0)
pkt_dev->flags &= ~F_QUEUE_MAP_RND;
+
+ else if (strcmp(f, "QUEUE_MAP_CPU") == 0)
+ pkt_dev->flags |= F_QUEUE_MAP_CPU;
+
+ else if (strcmp(f, "!QUEUE_MAP_CPU") == 0)
+ pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
#ifdef CONFIG_XFRM
else if (strcmp(f, "IPSEC") == 0)
pkt_dev->flags |= F_IPSEC_ON;
@@ -1895,6 +1905,23 @@ static int pktgen_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname)
+{
+ char b[IFNAMSIZ+5];
+ int i = 0;
+
+ for(i=0; ifname[i] != '@'; i++) {
+ if(i == IFNAMSIZ)
+ break;
+
+ b[i] = ifname[i];
+ }
+ b[i] = 0;
+
+ return dev_get_by_name(&init_net, b);
+}
+
+
/* Associate pktgen_dev with a device. */
static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
@@ -1908,7 +1935,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
pkt_dev->odev = NULL;
}
- odev = dev_get_by_name(&init_net, ifname);
+ odev = pktgen_dev_get_by_name(pkt_dev, ifname);
if (!odev) {
printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname);
return -ENODEV;
@@ -1934,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
*/
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{
+ int ntxq;
+
if (!pkt_dev->odev) {
printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
"setup_inject.\n");
@@ -1942,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
return;
}
+ /* make sure that we don't pick a non-existing transmit queue */
+ ntxq = pkt_dev->odev->real_num_tx_queues;
+ if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
+ printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
+ "disabled because CPU count (%d) exceeds number ",
+ num_online_cpus());
+ printk(KERN_WARNING "pktgen: WARNING: of tx queues "
+ "(%d) on %s \n", ntxq, pkt_dev->odev->name);
+ pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
+ }
+ if (ntxq <= pkt_dev->queue_map_min) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_min (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_min);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_min = ntxq - 1;
+ }
+ if (ntxq <= pkt_dev->queue_map_max) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_max (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_max);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_max = ntxq - 1;
+ }
+
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
@@ -2085,15 +2141,19 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
/* reset time */
pkt_dev->flows[flow].count = 0;
+ pkt_dev->flows[flow].flags = 0;
pkt_dev->curfl += 1;
if (pkt_dev->curfl >= pkt_dev->cflows)
pkt_dev->curfl = 0; /*reset */
}
} else {
flow = random32() % pkt_dev->cflows;
+ pkt_dev->curfl = flow;
- if (pkt_dev->flows[flow].count > pkt_dev->lflow)
+ if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
pkt_dev->flows[flow].count = 0;
+ pkt_dev->flows[flow].flags = 0;
+ }
}
return pkt_dev->curfl;
@@ -2125,7 +2185,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
#endif
static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
{
- if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
+
+ if (pkt_dev->flags & F_QUEUE_MAP_CPU)
+ pkt_dev->cur_queue_map = smp_processor_id();
+
+ else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
t = random32() %
@@ -2162,7 +2226,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
mc = random32() % pkt_dev->src_mac_count;
else {
mc = pkt_dev->cur_src_mac_offset++;
- if (pkt_dev->cur_src_mac_offset >
+ if (pkt_dev->cur_src_mac_offset >=
pkt_dev->src_mac_count)
pkt_dev->cur_src_mac_offset = 0;
}
@@ -2189,7 +2253,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
else {
mc = pkt_dev->cur_dst_mac_offset++;
- if (pkt_dev->cur_dst_mac_offset >
+ if (pkt_dev->cur_dst_mac_offset >=
pkt_dev->dst_mac_count) {
pkt_dev->cur_dst_mac_offset = 0;
}
@@ -3305,6 +3369,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
txq = netdev_get_tx_queue(odev, queue_map);
if (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq) ||
need_resched()) {
idle_start = getCurUs();
@@ -3320,7 +3385,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->idle_acc += getCurUs() - idle_start;
- if (netif_tx_queue_stopped(txq)) {
+ if (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq)) {
pkt_dev->next_tx_us = getCurUs(); /* TODO */
pkt_dev->next_tx_ns = 0;
goto out; /* Try the next interface */
@@ -3352,7 +3418,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq);
- if (!netif_tx_queue_stopped(txq)) {
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq)) {
atomic_inc(&(pkt_dev->skb->users));
retry_now:
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 2d3035d3abd7..7552495aff7a 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -123,7 +123,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
}
}
- BUG_TRAP(lopt->qlen == 0);
+ WARN_ON(lopt->qlen != 0);
if (lopt_size > PAGE_SIZE)
vfree(lopt);
else
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e4115672b6cf..ca1ccdf1ef76 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -485,6 +485,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(head);
C(data);
C(truesize);
+#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
+ C(do_not_encrypt);
+#endif
atomic_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -1200,7 +1203,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
@@ -1229,7 +1232,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
@@ -1475,7 +1478,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + frag->size;
if ((copy = end - offset) > 0) {
@@ -1503,7 +1506,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
@@ -1552,7 +1555,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
@@ -1581,7 +1584,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
@@ -1629,7 +1632,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
@@ -1662,7 +1665,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
__wsum csum2;
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
@@ -2253,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
segs = nskb;
tail = nskb;
- nskb->dev = skb->dev;
- skb_copy_queue_mapping(nskb, skb);
- nskb->priority = skb->priority;
- nskb->protocol = skb->protocol;
- nskb->vlan_tci = skb->vlan_tci;
- nskb->dst = dst_clone(skb->dst);
- memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
- nskb->pkt_type = skb->pkt_type;
+ __copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
skb_reserve(nskb, headroom);
@@ -2271,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
doffset);
if (!sg) {
+ nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(skb, offset,
skb_put(nskb, len),
len, 0);
@@ -2280,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
frag = skb_shinfo(nskb)->frags;
k = 0;
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum = skb->csum;
skb_copy_from_linear_data_offset(skb, offset,
skb_put(nskb, hsize), hsize);
@@ -2373,7 +2368,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
@@ -2397,7 +2392,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 10a64d57078c..91f8bbc93526 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -180,7 +180,7 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
"clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
"clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
- "clock-27" , "clock-28" , "clock-29" ,
+ "clock-27" , "clock-28" , "clock-AF_CAN" ,
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_MAX"
};
diff --git a/net/core/stream.c b/net/core/stream.c
index 4a0ad152c9c4..a6b3437ff082 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -192,13 +192,13 @@ void sk_stream_kill_queues(struct sock *sk)
__skb_queue_purge(&sk->sk_error_queue);
/* Next, the write queue. */
- BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
+ WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
sk_mem_reclaim(sk);
- BUG_TRAP(!sk->sk_wmem_queued);
- BUG_TRAP(!sk->sk_forward_alloc);
+ WARN_ON(sk->sk_wmem_queued);
+ WARN_ON(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a570e2af22cb..f686467ff12b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -67,7 +67,7 @@ static struct ctl_table net_core_table[] = {
{
.ctl_name = NET_CORE_MSG_COST,
.procname = "message_cost",
- .data = &net_msg_cost,
+ .data = &net_ratelimit_state.interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
@@ -76,7 +76,7 @@ static struct ctl_table net_core_table[] = {
{
.ctl_name = NET_CORE_MSG_BURST,
.procname = "message_burst",
- .data = &net_msg_burst,
+ .data = &net_ratelimit_state.burst,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index c77aff9c6eb3..164b090d5ac3 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -27,13 +27,13 @@
#include <linux/dmaengine.h>
#include <linux/socket.h>
-#include <linux/rtnetlink.h> /* for BUG_TRAP */
#include <net/tcp.h>
#include <net/netdma.h>
#define NET_DMA_DEFAULT_COPYBREAK 4096
int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
+EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
/**
* dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
@@ -71,7 +71,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
copy = end - offset;
@@ -100,7 +100,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
copy = end - offset;
diff --git a/net/core/utils.c b/net/core/utils.c
index 8031eb59054e..72e0ebe964a0 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -31,17 +31,16 @@
#include <asm/system.h>
#include <asm/uaccess.h>
-int net_msg_cost __read_mostly = 5*HZ;
-int net_msg_burst __read_mostly = 10;
int net_msg_warn __read_mostly = 1;
EXPORT_SYMBOL(net_msg_warn);
+DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10);
/*
* All net warning printk()s should be guarded by this function.
*/
int net_ratelimit(void)
{
- return __printk_ratelimit(net_msg_cost, net_msg_burst);
+ return __ratelimit(&net_ratelimit_state);
}
EXPORT_SYMBOL(net_ratelimit);