summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-04 04:10:39 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-04 04:10:39 +0400
commitc42f1d4b523950c4af060f8fc0c7016755d8a3bc (patch)
tree8322c9c14b0c7055ce924d12c1150d2acb0ea95e /net
parent913a90416918a591e6d5ece036b795c58a08131d (diff)
parent5a8887d39e1ba5ee2d4ccb94b14d6f2dce5ddfca (diff)
downloadlinux-c42f1d4b523950c4af060f8fc0c7016755d8a3bc.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Transfer padding was wrong for full-speed USB in ASIX driver, fix from Ingo van Lil. 2) Propagate the negative packet offset fix into the PowerPC BPF JIT. From Jan Seiffert. 3) dl2k driver's private ioctls were letting unprivileged tasks make MII writes and other ugly bits like that. Fix from Jeff Mahoney. 4) Fix TX VLAN and RX packet drops in ucc_geth, from Joakim Tjernlund. 5) OOPS and network namespace fixes in IPVS from Hans Schillstrom and Julian Anastasov. 6) Fix races and sleeping in locked context bugs in drop_monitor, from Neil Horman. 7) Fix link status indication in smsc95xx driver, from Paolo Pisati. 8) Fix bridge netfilter OOPS, from Peter Huang. 9) L2TP sendmsg can return on error conditions with the socket lock held, oops. Fix from Sasha Levin. 10) udp_diag should return meaningful values for socket memory usage, from Shan Wei. 11) Eric Dumazet is so awesome he gets his own section: Socket memory cgroup code (I never should have applied those patches, grumble...) made erroneous changes to sk_sockets_allocated_read_positive(). It was changed to use percpu_counter_sum_positive (which requires BH disabling) instead of percpu_counter_read_positive (which does not). Revert back to avoid crashes and lockdep warnings. Adjust the default tcp_adv_win_scale and tcp_rmem[2] values to fix throughput regressions. This is necessary as a result of our more precise skb->truesize tracking. Fix SKB leak in netem packet scheduler. 12) New device IDs for various bluetooth devices, from Manoj Iyer, AceLan Kao, and Steven Harms. 13) Fix command completion race in ipw2200, from Stanislav Yakovlev. 14) Fix rtlwifi oops on unload, from Larry Finger. 15) Fix hard_mtu when adjusting hard_header_len in smsc95xx driver. From Stephane Fillod. 16) ehea driver registers it's IRQ before all the necessary state is setup, resulting in crashes. Fix from Thadeu Lima de Souza Cascardo. 17) Fix PHY connection failures in davinci_emac driver, from Anatolij Gustschin. 18) Missing break; in switch statement in bluetooth's hci_cmd_complete_evt(). Fix from Szymon Janc. 19) Fix queue programming in iwlwifi, from Johannes Berg. 20) Interrupt throttling defaults not being actually programmed into the hardware, fix from Jeff Kirsher and Ying Cai. 21) TLAN driver SKB encoding in descriptor busted on 64-bit, fix from Benjamin Poirier. 22) Fix blind status block RX producer pointer deref in TG3 driver, from Matt Carlson. 23) Promisc and multicast are busted on ehea, fixes from Thadeu Lima de Souza Cascardo. 24) Fix crashes in 6lowpan, from Alexander Smirnov. 25) tcp_complete_cwr() needs to be careful to not rewind the CWND to ssthresh if ssthresh has the "infinite" value. Fix from Yuchung Cheng. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (81 commits) sungem: Fix WakeOnLan tcp: change tcp_adv_win_scale and tcp_rmem[2] net: l2tp: unlock socket lock before returning from l2tp_ip_sendmsg drop_monitor: prevent init path from scheduling on the wrong cpu usbnet: fix failure handling in usbnet_probe usbnet: fix leak of transfer buffer of dev->interrupt ucc_geth: Add 16 bytes to max TX frame for VLANs net: ucc_geth, increase no. of HW RX descriptors netem: fix possible skb leak sky2: fix receive length error in mixed non-VLAN/VLAN traffic sky2: propogate rx hash when packet is copied net: fix two typos in skbuff.h cxgb3: Don't call cxgb_vlan_mode until q locks are initialized ixgbe: fix calling skb_put on nonlinear skb assertion bug ixgbe: Fix a memory leak in IEEE DCB igbvf: fix the bug when initializing the igbvf smsc75xx: enable mac to detect speed/duplex from phy smsc75xx: declare smsc75xx's MII as GMII capable smsc75xx: fix phy interrupt acknowledge smsc75xx: fix phy init reset loop ...
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/drop_monitor.c88
-rw-r--r--net/ieee802154/6lowpan.c40
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/sched/sch_netem.c6
27 files changed, 246 insertions, 106 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 92a857e3786d..edfd61addcec 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1215,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
return NULL;
}
-static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
+static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
u8 key_type, u8 old_key_type)
{
/* Legacy key */
if (key_type < 0x03)
- return 1;
+ return true;
/* Debug keys are insecure so don't store them persistently */
if (key_type == HCI_LK_DEBUG_COMBINATION)
- return 0;
+ return false;
/* Changed combination key and there's no previous one */
if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
- return 0;
+ return false;
/* Security mode 3 case */
if (!conn)
- return 1;
+ return true;
/* Neither local nor remote side had no-bonding as requirement */
if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
- return 1;
+ return true;
/* Local side had dedicated bonding as requirement */
if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
- return 1;
+ return true;
/* Remote side had dedicated bonding as requirement */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
- return 1;
+ return true;
/* If none of the above criteria match, then don't store the key
* persistently */
- return 0;
+ return false;
}
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1285,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
- u8 old_key_type, persistent;
+ u8 old_key_type;
+ bool persistent;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
@@ -1328,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
mgmt_new_link_key(hdev, key, persistent);
- if (!persistent) {
- list_del(&key->list);
- kfree(key);
- }
+ if (conn)
+ conn->flush_key = !persistent;
return 0;
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c49..6c065254afc0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
}
if (ev->status == 0) {
+ if (conn->type == ACL_LINK && conn->flush_key)
+ hci_remove_link_key(hdev, &conn->dst);
hci_proto_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
}
@@ -2311,6 +2313,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
case HCI_OP_USER_PASSKEY_NEG_REPLY:
hci_cc_user_passkey_neg_reply(hdev, skb);
+ break;
case HCI_OP_LE_SET_SCAN_PARAM:
hci_cc_le_set_scan_param(hdev, skb);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4ef275c69675..4bb03b111122 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2884,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
{
struct mgmt_ev_new_link_key ev;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e711..a2098e3de500 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
+ br_drop_fake_rtable(skb);
dev_queue_xmit(skb);
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f3817133..d7f49b63ab0f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM | DST_NOPEER;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
rt->dst.ops = &fake_dst_ops;
}
@@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct rtable *rt = skb_rtable(skb);
-
- if (rt && rt == bridge_parent_rtable(in))
- skb_dst_drop(skb);
-
+ br_drop_fake_rtable(skb);
return NF_ACCEPT;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 5c3c81a609e5..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
* netlink alerts
*/
static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
struct per_cpu_dm_data {
struct work_struct dm_alert_work;
- struct sk_buff *skb;
+ struct sk_buff __rcu *skb;
atomic_t dm_hit_count;
struct timer_list send_timer;
+ int cpu;
};
struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
size_t al;
struct net_dm_alert_msg *msg;
struct nlattr *nla;
+ struct sk_buff *skb;
+ struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
al += sizeof(struct nlattr);
- data->skb = genlmsg_new(al, GFP_KERNEL);
- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- atomic_set(&data->dm_hit_count, dm_hit_limit);
+ skb = genlmsg_new(al, GFP_KERNEL);
+
+ if (skb) {
+ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ } else
+ schedule_work_on(data->cpu, &data->dm_alert_work);
+
+ /*
+ * Don't need to lock this, since we are guaranteed to only
+ * run this on a single cpu at a time.
+ * Note also that we only update data->skb if the old and new skb
+ * pointers don't match. This ensures that we don't continually call
+ * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
+ */
+ if (skb != oskb) {
+ rcu_assign_pointer(data->skb, skb);
+
+ synchronize_rcu();
+
+ atomic_set(&data->dm_hit_count, dm_hit_limit);
+ }
+
}
static void send_dm_alert(struct work_struct *unused)
{
struct sk_buff *skb;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+ WARN_ON_ONCE(data->cpu != smp_processor_id());
/*
* Grab the skb we're about to send
*/
- skb = data->skb;
+ skb = rcu_dereference_protected(data->skb, 1);
/*
* Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
/*
* Ship it!
*/
- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ if (skb)
+ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ put_cpu_var(dm_cpu_data);
}
/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
*/
static void sched_send_work(unsigned long unused)
{
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+ schedule_work_on(smp_processor_id(), &data->dm_alert_work);
- schedule_work(&data->dm_alert_work);
+ put_cpu_var(dm_cpu_data);
}
static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
struct nlmsghdr *nlh;
struct nlattr *nla;
int i;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct sk_buff *dskb;
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+
+ rcu_read_lock();
+ dskb = rcu_dereference(data->skb);
+ if (!dskb)
+ goto out;
if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
/*
@@ -144,7 +180,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
goto out;
}
- nlh = (struct nlmsghdr *)data->skb->data;
+ nlh = (struct nlmsghdr *)dskb->data;
nla = genlmsg_data(nlmsg_data(nlh));
msg = nla_data(nla);
for (i = 0; i < msg->entries; i++) {
@@ -158,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
/*
* We need to create a new entry
*/
- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
msg->points[msg->entries].count = 1;
@@ -170,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
}
out:
+ rcu_read_unlock();
+ put_cpu_var(dm_cpu_data);
return;
}
@@ -214,7 +252,7 @@ static int set_all_monitor_traces(int state)
struct dm_hw_stat_delta *new_stat = NULL;
struct dm_hw_stat_delta *temp;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
if (state == trace_state) {
rc = -EAGAIN;
@@ -253,7 +291,7 @@ static int set_all_monitor_traces(int state)
rc = -EINPROGRESS;
out_unlock:
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
return rc;
}
@@ -296,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
new_stat->dev = dev;
new_stat->last_rx = jiffies;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_add_rcu(&new_stat->list, &hw_stats_list);
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
case NETDEV_UNREGISTER:
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
new_stat->dev = NULL;
@@ -312,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
}
}
}
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
}
out:
@@ -368,13 +406,15 @@ static int __init init_net_drop_monitor(void)
for_each_present_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
- reset_per_cpu_data(data);
+ data->cpu = cpu;
INIT_WORK(&data->dm_alert_work, send_dm_alert);
init_timer(&data->send_timer);
data->send_timer.data = cpu;
data->send_timer.function = sched_send_work;
+ reset_per_cpu_data(data);
}
+
goto out;
out_unreg:
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 368515885368..840821b90bcd 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev)
free_netdev(dev);
}
+static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
+}
+
+static u16 lowpan_get_pan_id(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static u16 lowpan_get_short_addr(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
@@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
+static struct ieee802154_mlme_ops lowpan_mlme = {
+ .get_pan_id = lowpan_get_pan_id,
+ .get_phy = lowpan_get_phy,
+ .get_short_addr = lowpan_get_short_addr,
+};
+
static void lowpan_setup(struct net_device *dev)
{
pr_debug("(%s)\n", __func__);
@@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev)
dev->netdev_ops = &lowpan_netdev_ops;
dev->header_ops = &lowpan_header_ops;
+ dev->ml_priv = &lowpan_mlme;
dev->destructor = lowpan_dev_free;
}
@@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
list_add_tail(&entry->list, &lowpan_devices);
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ spin_lock_init(&flist_lock);
+
register_netdevice(dev);
return 0;
@@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
{
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry;
- struct lowpan_dev_record *tmp;
+ struct lowpan_dev_record *entry, *tmp;
+ struct lowpan_fragment *frame, *tframe;
ASSERT_RTNL();
+ spin_lock(&flist_lock);
+ list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+ del_timer(&frame->timer);
+ list_del(&frame->list);
+ dev_kfree_skb(frame->skb);
+ kfree(frame);
+ }
+ spin_unlock(&flist_lock);
+
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
if (entry->ldev == dev) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c557eb..8f8db724bfaf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto rtattr_failure;
if (icsk == NULL) {
- r->idiag_rqueue = r->idiag_wqueue = 0;
+ handler->idiag_get_info(sk, r, NULL);
goto out;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8bb6adeb62c0..1272a88c2a63 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3243,7 +3243,7 @@ void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
- int max_share, cnt;
+ int max_rshare, max_wshare, cnt;
unsigned int i;
unsigned long jiffy = jiffies;
@@ -3303,15 +3303,16 @@ void __init tcp_init(void)
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
- max_share = min(4UL*1024*1024, limit);
+ max_wshare = min(4UL*1024*1024, limit);
+ max_rshare = min(6UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
- sysctl_tcp_wmem[2] = max(64*1024, max_share);
+ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
- sysctl_tcp_rmem[2] = max(87380, max_share);
+ sysctl_tcp_rmem[2] = max(87380, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ff364065376..257b61789eeb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
EXPORT_SYMBOL(sysctl_tcp_ecn);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
-int sysctl_tcp_adv_win_scale __read_mostly = 2;
+int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
int sysctl_tcp_stdurg __read_mostly;
@@ -495,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
- tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
+ tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
new_measure:
tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2868,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
/* Do not moderate cwnd if it's already undone in cwr or recovery. */
if (tp->undo_marker) {
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
- else /* PRR */
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
+ /* PRR algorithm. */
tp->snd_cwnd = tp->snd_ssthresh;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
return udp_dump_one(&udp_table, in_skb, nlh, req);
}
+static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ void *info)
+{
+ r->idiag_rqueue = sk_rmem_alloc_get(sk);
+ r->idiag_wqueue = sk_wmem_alloc_get(sk);
+}
+
static const struct inet_diag_handler udp_diag_handler = {
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
+ .idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDP,
};
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
static const struct inet_diag_handler udplite_diag_handler = {
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
+ .idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDPLITE,
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 585d93ecee2d..6274f0be82b0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -442,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
daddr = lip->l2tp_addr.s_addr;
} else {
+ rc = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
- return -EDESTADDRREQ;
+ goto out;
daddr = inet->inet_daddr;
connected = 1;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a307f20..db8fae51714c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f0731e..c20051b7ffcd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
skb_queue_purge(&sdata->u.ap.ps_bc_buf);
+ } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ ieee80211_mgd_stop(sdata);
}
if (going_down)
@@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_rmc_free(sdata);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_mgd_teardown(sdata);
flushed = sta_info_flush(local, sdata);
WARN_ON(flushed);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f76da5b3f5c5..20c680bfc3ae 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3497,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata)
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a60198df4..e76facc69e95 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = rcu_dereference(sdata->u.vlan.sta);
if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
return TX_DROP;
- } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+ tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
if (!tx->sta)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e7788..00bdb1d9d690 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1924,6 +1924,7 @@ protocol_fail:
control_fail:
ip_vs_estimator_net_cleanup(net);
estimator_fail:
+ net->ipvs = NULL;
return -ENOMEM;
}
@@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
ip_vs_control_net_cleanup(net);
ip_vs_estimator_net_cleanup(net);
IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+ net->ipvs = NULL;
}
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void)
goto cleanup_dev;
}
+ ret = ip_vs_register_nl_ioctl();
+ if (ret < 0) {
+ pr_err("can't register netlink/ioctl.\n");
+ goto cleanup_hooks;
+ }
+
pr_info("ipvs loaded.\n");
return ret;
+cleanup_hooks:
+ nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
cleanup_dev:
unregister_pernet_device(&ipvs_core_dev_ops);
cleanup_sub:
@@ -2012,6 +2022,7 @@ exit:
static void __exit ip_vs_cleanup(void)
{
+ ip_vs_unregister_nl_ioctl();
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
unregister_pernet_device(&ipvs_core_dev_ops);
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af61..f5589987fc80 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
return 0;
}
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
#else
int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
#endif
@@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
free_percpu(ipvs->tot_stats.cpustats);
}
-int __init ip_vs_control_init(void)
+int __init ip_vs_register_nl_ioctl(void)
{
- int idx;
int ret;
- EnterFunction(2);
-
- /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
- INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
- }
-
- smp_wmb(); /* Do we really need it now ? */
-
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
@@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void)
pr_err("cannot register Generic Netlink interface.\n");
goto err_genl;
}
-
- ret = register_netdevice_notifier(&ip_vs_dst_notifier);
- if (ret < 0)
- goto err_notf;
-
- LeaveFunction(2);
return 0;
-err_notf:
- ip_vs_genl_unregister();
err_genl:
nf_unregister_sockopt(&ip_vs_sockopts);
err_sock:
return ret;
}
+void ip_vs_unregister_nl_ioctl(void)
+{
+ ip_vs_genl_unregister();
+ nf_unregister_sockopt(&ip_vs_sockopts);
+}
+
+int __init ip_vs_control_init(void)
+{
+ int idx;
+ int ret;
+
+ EnterFunction(2);
+
+ /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
+ INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
+ }
+
+ smp_wmb(); /* Do we really need it now ? */
+
+ ret = register_netdevice_notifier(&ip_vs_dst_notifier);
+ if (ret < 0)
+ return ret;
+
+ LeaveFunction(2);
+ return 0;
+}
+
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
unregister_netdevice_notifier(&ip_vs_dst_notifier);
- ip_vs_genl_unregister();
- nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f68..e39f693dd3e4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
struct ip_vs_app *app;
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
if (!app)
return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd058..caa43704e55e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
+
if (!net_eq(net, &init_net)) {
ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce7..548bf37aa29e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
+
if (!net_eq(net, &init_net)) {
ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a8833250..ed835e67a07e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
return 0;
}
-#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
- defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
- defined(CONFIG_IP_VS_PROTO_ESP)
/*
* register an ipvs protocols netns related data
*/
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
ipvs->proto_data_table[hash] = pd;
atomic_set(&pd->appcnt, 0); /* Init app counter */
- if (pp->init_netns != NULL)
- pp->init_netns(net, pd);
+ if (pp->init_netns != NULL) {
+ int ret = pp->init_netns(net, pd);
+ if (ret) {
+ /* unlink an free proto data */
+ ipvs->proto_data_table[hash] = pd->next;
+ kfree(pd);
+ return ret;
+ }
+ }
return 0;
}
-#endif
/*
* unregister an ipvs protocol
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
*/
int __net_init ip_vs_protocol_net_init(struct net *net)
{
+ int i, ret;
+ static struct ip_vs_protocol *protos[] = {
#ifdef CONFIG_IP_VS_PROTO_TCP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+ &ip_vs_protocol_tcp,
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+ &ip_vs_protocol_udp,
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+ &ip_vs_protocol_sctp,
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
- register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+ &ip_vs_protocol_ah,
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+ &ip_vs_protocol_esp,
#endif
+ };
+
+ for (i = 0; i < ARRAY_SIZE(protos); i++) {
+ ret = register_ip_vs_proto_netns(net, protos[i]);
+ if (ret < 0)
+ goto cleanup;
+ }
return 0;
+
+cleanup:
+ ip_vs_protocol_net_cleanup(net);
+ return ret;
}
void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
* timeouts is netns related now.
* ---------------------------------------------
*/
-static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->sctp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
sizeof(sctp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
+ return 0;
}
static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
* timeouts is netns related now.
* ---------------------------------------------
*/
-static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->tcp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
sizeof(tcp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
pd->tcp_state_table = tcp_states;
+ return 0;
}
static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
}
-static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->udp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
sizeof(udp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
+ return 0;
}
static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa58..3746d8b9a478 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- if (info->timeout) {
+ if (info->timeout[0]) {
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct nf_conn_timeout *timeout_ext;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae9..ebd22966f748 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb))) {
- sch->qstats.drops++;
- return NET_XMIT_DROP;
- }
+ skb_checksum_help(skb)))
+ return qdisc_drop(skb, sch);
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}