summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-11 05:11:41 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-11 05:11:41 +0300
commit2df4ee78d042ee3d17cbebd51e31b300286549dc (patch)
tree7c723c99569e1f1a81490d7b31e5d6af27b6d169 /net
parent3419b45039c6b799c974a8019361c045e7ca232c (diff)
parent8a921265e2cd31e61a0c2eda582af54c5bfef897 (diff)
downloadlinux-2df4ee78d042ee3d17cbebd51e31b300286549dc.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix null deref in xt_TEE netfilter module, from Eric Dumazet. 2) Several spots need to get to the original listner for SYN-ACK packets, most spots got this ok but some were not. Whilst covering the remaining cases, create a helper to do this. From Eric Dumazet. 3) Missiing check of return value from alloc_netdev() in CAIF SPI code, from Rasmus Villemoes. 4) Don't sleep while != TASK_RUNNING in macvtap, from Vlad Yasevich. 5) Use after free in mvneta driver, from Justin Maggard. 6) Fix race on dst->flags access in dst_release(), from Eric Dumazet. 7) Add missing ZLIB_INFLATE dependency for new qed driver. From Arnd Bergmann. 8) Fix multicast getsockopt deadlock, from WANG Cong. 9) Fix deadlock in btusb, from Kuba Pawlak. 10) Some ipv6_add_dev() failure paths were not cleaning up the SNMP6 counter state. From Sabrina Dubroca. 11) Fix packet_bind() race, which can cause lost notifications, from Francesco Ruggeri. 12) Fix MAC restoration in qlcnic driver during bonding mode changes, from Jarod Wilson. 13) Revert bridging forward delay change which broke libvirt and other userspace things, from Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits) Revert "bridge: Allow forward delay to be cfgd when STP enabled" bpf_trace: Make dependent on PERF_EVENTS qed: select ZLIB_INFLATE net: fix a race in dst_release() net: mvneta: Fix memory use after free. net: Documentation: Fix default value tcp_limit_output_bytes macvtap: Resolve possible __might_sleep warning in macvtap_do_read() mvneta: add FIXED_PHY dependency net: caif: check return value of alloc_netdev net: hisilicon: NET_VENDOR_HISILICON should depend on HAS_DMA drivers: net: xgene: fix RGMII 10/100Mb mode netfilter: nft_meta: use skb_to_full_sk() helper net_sched: em_meta: use skb_to_full_sk() helper sched: cls_flow: use skb_to_full_sk() helper netfilter: xt_owner: use skb_to_full_sk() helper smack: use skb_to_full_sk() helper net: add skb_to_full_sk() helper and use it in selinux_netlbl_skbuff_setsid() bpf: doc: correct arch list for supported eBPF JIT dwc_eth_qos: Delete an unnecessary check before the function call "of_node_put" bonding: fix panic on non-ARPHRD_ETHER enslave failure ...
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c17
-rw-r--r--net/bluetooth/l2cap_core.c20
-rw-r--r--net/bridge/br_stp.c13
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/dst.c2
-rw-r--r--net/ipv4/fib_semantics.c13
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/ip_sockglue.c45
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/sysctl_net_ipv4.c4
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/tcp_ipv6.c20
-rw-r--r--net/netfilter/nf_nat_redirect.c2
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nft_meta.c36
-rw-r--r--net/netfilter/xt_TEE.c6
-rw-r--r--net/netfilter/xt_owner.c6
-rw-r--r--net/packet/af_packet.c80
-rw-r--r--net/sched/cls_flow.c15
-rw-r--r--net/sched/em_meta.c138
-rw-r--r--net/vmw_vsock/vmci_transport.c2
23 files changed, 288 insertions, 163 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 83a6aacfab31..62edbf1b114e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -508,12 +508,6 @@ static void le_setup(struct hci_request *req)
/* Read LE Supported States */
hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
- /* Read LE White List Size */
- hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
-
- /* Clear LE White List */
- hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
-
/* LE-only controllers have LE implicitly enabled */
if (!lmp_bredr_capable(hdev))
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
@@ -832,6 +826,17 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
}
+ if (hdev->commands[26] & 0x40) {
+ /* Read LE White List Size */
+ hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
+ 0, NULL);
+ }
+
+ if (hdev->commands[26] & 0x80) {
+ /* Clear LE White List */
+ hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
+ }
+
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
/* Read LE Maximum Data Length */
hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 7c65ee200c29..66e8b6ee19a5 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -239,7 +239,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
else
dyn_end = L2CAP_CID_DYN_END;
- for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
+ for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
if (!__l2cap_get_chan_by_scid(conn, cid))
return cid;
}
@@ -5250,7 +5250,9 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
credits = __le16_to_cpu(rsp->credits);
result = __le16_to_cpu(rsp->result);
- if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
+ if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
+ dcid < L2CAP_CID_DYN_START ||
+ dcid > L2CAP_CID_LE_DYN_END))
return -EPROTO;
BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
@@ -5270,6 +5272,11 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
switch (result) {
case L2CAP_CR_SUCCESS:
+ if (__l2cap_get_chan_by_dcid(conn, dcid)) {
+ err = -EBADSLT;
+ break;
+ }
+
chan->ident = 0;
chan->dcid = dcid;
chan->omtu = mtu;
@@ -5437,9 +5444,16 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
goto response_unlock;
}
+ /* Check for valid dynamic CID range */
+ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
+ result = L2CAP_CR_INVALID_SCID;
+ chan = NULL;
+ goto response_unlock;
+ }
+
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(conn, scid)) {
- result = L2CAP_CR_NO_MEM;
+ result = L2CAP_CR_SCID_IN_USE;
chan = NULL;
goto response_unlock;
}
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 80c34d70218c..f7e8dee64fc8 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -600,12 +600,17 @@ void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
int br_set_forward_delay(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
-
- if (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)
- return -ERANGE;
+ int err = -ERANGE;
spin_lock_bh(&br->lock);
+ if (br->stp_enabled != BR_NO_STP &&
+ (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
+ goto unlock;
+
__br_set_forward_delay(br, t);
+ err = 0;
+
+unlock:
spin_unlock_bh(&br->lock);
- return 0;
+ return err;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 8ce3f74cd6b9..ab9b8d0d115e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6402,7 +6402,7 @@ int __netdev_update_features(struct net_device *dev)
struct net_device *upper, *lower;
netdev_features_t features;
struct list_head *iter;
- int err = 0;
+ int err = -1;
ASSERT_RTNL();
@@ -6419,7 +6419,7 @@ int __netdev_update_features(struct net_device *dev)
features = netdev_sync_upper_features(dev, upper, features);
if (dev->features == features)
- return 0;
+ goto sync_lower;
netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
&dev->features, &features);
@@ -6434,6 +6434,7 @@ int __netdev_update_features(struct net_device *dev)
return -1;
}
+sync_lower:
/* some features must be disabled on lower devices when disabled
* on an upper device (think: bonding master or bridge)
*/
@@ -6443,7 +6444,7 @@ int __netdev_update_features(struct net_device *dev)
if (!err)
dev->features = features;
- return 1;
+ return err < 0 ? 0 : 1;
}
/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 2a1818065e12..e6dc77252fe9 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -306,7 +306,7 @@ void dst_release(struct dst_entry *dst)
if (unlikely(newrefcnt < 0))
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+ if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
call_rcu(&dst->rcu_head, dst_destroy_rcu);
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3e87447e65c7..d97268e8ff10 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -923,14 +923,21 @@ static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
fib_prefsrc != cfg->fc_dst) {
u32 tb_id = cfg->fc_table;
+ int rc;
if (tb_id == RT_TABLE_MAIN)
tb_id = RT_TABLE_LOCAL;
- if (inet_addr_type_table(cfg->fc_nlinfo.nl_net,
- fib_prefsrc, tb_id) != RTN_LOCAL) {
- return false;
+ rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+ fib_prefsrc, tb_id);
+
+ if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
+ rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+ fib_prefsrc, RT_TABLE_LOCAL);
}
+
+ if (rc != RTN_LOCAL)
+ return false;
}
return true;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 64aaf3522a59..6baf36e11808 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2392,11 +2392,11 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
+ ASSERT_RTNL();
+
if (!ipv4_is_multicast(addr))
return -EINVAL;
- rtnl_lock();
-
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0;
@@ -2417,7 +2417,6 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
goto done;
msf->imsf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
- rtnl_unlock();
if (!psl) {
len = 0;
count = 0;
@@ -2436,7 +2435,6 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
return -EFAULT;
return 0;
done:
- rtnl_unlock();
return err;
}
@@ -2450,6 +2448,8 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
+ ASSERT_RTNL();
+
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET)
return -EINVAL;
@@ -2457,8 +2457,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!ipv4_is_multicast(addr))
return -EINVAL;
- rtnl_lock();
-
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
@@ -2470,7 +2468,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
goto done;
gsf->gf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
- rtnl_unlock();
count = psl ? psl->sl_count : 0;
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
gsf->gf_numsrc = count;
@@ -2490,7 +2487,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
}
return 0;
done:
- rtnl_unlock();
return err;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c3c359ad66e3..5f73a7c03e27 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1251,11 +1251,22 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
* the _received_ ones. The set sets the _sent_ ones.
*/
+static bool getsockopt_needs_rtnl(int optname)
+{
+ switch (optname) {
+ case IP_MSFILTER:
+ case MCAST_MSFILTER:
+ return true;
+ }
+ return false;
+}
+
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen, unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
- int val;
+ bool needs_rtnl = getsockopt_needs_rtnl(optname);
+ int val, err = 0;
int len;
if (level != SOL_IP)
@@ -1269,6 +1280,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
if (len < 0)
return -EINVAL;
+ if (needs_rtnl)
+ rtnl_lock();
lock_sock(sk);
switch (optname) {
@@ -1386,39 +1399,35 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_MSFILTER:
{
struct ip_msfilter msf;
- int err;
if (len < IP_MSFILTER_SIZE(0)) {
- release_sock(sk);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
- release_sock(sk);
- return -EFAULT;
+ err = -EFAULT;
+ goto out;
}
err = ip_mc_msfget(sk, &msf,
(struct ip_msfilter __user *)optval, optlen);
- release_sock(sk);
- return err;
+ goto out;
}
case MCAST_MSFILTER:
{
struct group_filter gsf;
- int err;
if (len < GROUP_FILTER_SIZE(0)) {
- release_sock(sk);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
- release_sock(sk);
- return -EFAULT;
+ err = -EFAULT;
+ goto out;
}
err = ip_mc_gsfget(sk, &gsf,
(struct group_filter __user *)optval,
optlen);
- release_sock(sk);
- return err;
+ goto out;
}
case IP_MULTICAST_ALL:
val = inet->mc_all;
@@ -1485,6 +1494,12 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
return -EFAULT;
}
return 0;
+
+out:
+ release_sock(sk);
+ if (needs_rtnl)
+ rtnl_unlock();
+ return err;
}
int ip_getsockopt(struct sock *sk, int level,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 0e5591c2ee9f..6fb869f646bf 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -67,10 +67,9 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
const struct nf_hook_state *state)
{
struct sock *sk = skb->sk;
- struct inet_sock *inet = inet_sk(skb->sk);
- if (sk && (sk->sk_family == PF_INET) &&
- inet->nodefrag)
+ if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) &&
+ inet_sk(sk)->nodefrag)
return NF_ACCEPT;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 25300c5e283b..a0bd7a55193e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -48,14 +48,14 @@ static void set_local_port_range(struct net *net, int range[2])
{
bool same_parity = !((range[0] ^ range[1]) & 1);
- write_seqlock(&net->ipv4.ip_local_ports.lock);
+ write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
if (same_parity && !net->ipv4.ip_local_ports.warned) {
net->ipv4.ip_local_ports.warned = true;
pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
}
net->ipv4.ip_local_ports.range[0] = range[0];
net->ipv4.ip_local_ports.range[1] = range[1];
- write_sequnlock(&net->ipv4.ip_local_ports.lock);
+ write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
}
/* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1c2648bbac4b..950e28c0cdf2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1326,6 +1326,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
+ if (*own_req)
+ tcp_move_syn(newtp, req);
return newsk;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 3575dd1e5b67..ac6b1961ffeb 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -551,9 +551,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->rack.mstamp.v64 = 0;
newtp->rack.advanced = 0;
- newtp->saved_syn = req->saved_syn;
- req->saved_syn = NULL;
-
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
return newsk;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d72fa90d6feb..d84742f003a9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -418,6 +418,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (err) {
ipv6_mc_destroy_dev(ndev);
del_timer(&ndev->regen_timer);
+ snmp6_unregister_dev(ndev);
goto err_release;
}
/* protected by rtnl_lock */
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ea2f4d5440b5..5baa8e754e41 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1140,14 +1140,18 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
goto out;
}
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
- /* Clone pktoptions received with SYN, if we own the req */
- if (*own_req && ireq->pktopts) {
- newnp->pktoptions = skb_clone(ireq->pktopts,
- sk_gfp_atomic(sk, GFP_ATOMIC));
- consume_skb(ireq->pktopts);
- ireq->pktopts = NULL;
- if (newnp->pktoptions)
- skb_set_owner_r(newnp->pktoptions, newsk);
+ if (*own_req) {
+ tcp_move_syn(newtp, req);
+
+ /* Clone pktoptions received with SYN, if we own the req */
+ if (ireq->pktopts) {
+ newnp->pktoptions = skb_clone(ireq->pktopts,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
+ consume_skb(ireq->pktopts);
+ ireq->pktopts = NULL;
+ if (newnp->pktoptions)
+ skb_set_owner_r(newnp->pktoptions, newsk);
+ }
}
return newsk;
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index 97b75f9bfbcd..d43869879fcf 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -55,7 +55,7 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
rcu_read_lock();
indev = __in_dev_get_rcu(skb->dev);
- if (indev != NULL) {
+ if (indev && indev->ifa_list) {
ifa = indev->ifa_list;
newdst = ifa->ifa_local;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index f1d9e887f5b1..46453ab318db 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -492,7 +492,7 @@ static int nfnetlink_bind(struct net *net, int group)
type = nfnl_group2type[group];
rcu_read_lock();
- ss = nfnetlink_get_subsys(type);
+ ss = nfnetlink_get_subsys(type << 8);
rcu_read_unlock();
if (!ss)
request_module("nfnetlink-subsys-%d", type);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e4ad2c24bc41..9dfaf4d55ee0 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -31,6 +31,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
const struct net_device *in = pkt->in, *out = pkt->out;
+ struct sock *sk;
u32 *dest = &regs->data[priv->dreg];
switch (priv->key) {
@@ -86,33 +87,35 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*(u16 *)dest = out->type;
break;
case NFT_META_SKUID:
- if (skb->sk == NULL || !sk_fullsock(skb->sk))
+ sk = skb_to_full_sk(skb);
+ if (!sk || !sk_fullsock(sk))
goto err;
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket == NULL ||
- skb->sk->sk_socket->file == NULL) {
- read_unlock_bh(&skb->sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_socket == NULL ||
+ sk->sk_socket->file == NULL) {
+ read_unlock_bh(&sk->sk_callback_lock);
goto err;
}
*dest = from_kuid_munged(&init_user_ns,
- skb->sk->sk_socket->file->f_cred->fsuid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
+ sk->sk_socket->file->f_cred->fsuid);
+ read_unlock_bh(&sk->sk_callback_lock);
break;
case NFT_META_SKGID:
- if (skb->sk == NULL || !sk_fullsock(skb->sk))
+ sk = skb_to_full_sk(skb);
+ if (!sk || !sk_fullsock(sk))
goto err;
- read_lock_bh(&skb->sk->sk_callback_lock);
- if (skb->sk->sk_socket == NULL ||
- skb->sk->sk_socket->file == NULL) {
- read_unlock_bh(&skb->sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_socket == NULL ||
+ sk->sk_socket->file == NULL) {
+ read_unlock_bh(&sk->sk_callback_lock);
goto err;
}
*dest = from_kgid_munged(&init_user_ns,
- skb->sk->sk_socket->file->f_cred->fsgid);
- read_unlock_bh(&skb->sk->sk_callback_lock);
+ sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&sk->sk_callback_lock);
break;
#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID: {
@@ -168,9 +171,10 @@ void nft_meta_get_eval(const struct nft_expr *expr,
break;
#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
- if (skb->sk == NULL || !sk_fullsock(skb->sk))
+ sk = skb_to_full_sk(skb);
+ if (!sk || !sk_fullsock(sk))
goto err;
- *dest = skb->sk->sk_classid;
+ *dest = sk->sk_classid;
break;
#endif
default:
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 899b06115fc5..3eff7b67cdf2 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -31,8 +31,9 @@ static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
+ int oif = info->priv ? info->priv->oif : 0;
- nf_dup_ipv4(par->net, skb, par->hooknum, &info->gw.in, info->priv->oif);
+ nf_dup_ipv4(par->net, skb, par->hooknum, &info->gw.in, oif);
return XT_CONTINUE;
}
@@ -42,8 +43,9 @@ static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
+ int oif = info->priv ? info->priv->oif : 0;
- nf_dup_ipv6(par->net, skb, par->hooknum, &info->gw.in6, info->priv->oif);
+ nf_dup_ipv6(par->net, skb, par->hooknum, &info->gw.in6, oif);
return XT_CONTINUE;
}
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index ca2e577ed8ac..1302b475abcb 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -14,6 +14,7 @@
#include <linux/skbuff.h>
#include <linux/file.h>
#include <net/sock.h>
+#include <net/inet_sock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_owner.h>
@@ -33,8 +34,9 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_owner_match_info *info = par->matchinfo;
const struct file *filp;
+ struct sock *sk = skb_to_full_sk(skb);
- if (skb->sk == NULL || skb->sk->sk_socket == NULL)
+ if (sk == NULL || sk->sk_socket == NULL)
return (info->match ^ info->invert) == 0;
else if (info->match & info->invert & XT_OWNER_SOCKET)
/*
@@ -43,7 +45,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
*/
return false;
- filp = skb->sk->sk_socket->file;
+ filp = sk->sk_socket->file;
if (filp == NULL)
return ((info->match ^ info->invert) &
(XT_OWNER_UID | XT_OWNER_GID)) == 0;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 691660b9b7ef..af399cac5205 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2911,22 +2911,40 @@ static int packet_release(struct socket *sock)
* Attach a packet hook.
*/
-static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ __be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
struct net_device *dev_curr;
__be16 proto_curr;
bool need_rehook;
+ struct net_device *dev = NULL;
+ int ret = 0;
+ bool unlisted = false;
- if (po->fanout) {
- if (dev)
- dev_put(dev);
-
+ if (po->fanout)
return -EINVAL;
- }
lock_sock(sk);
spin_lock(&po->bind_lock);
+ rcu_read_lock();
+
+ if (name) {
+ dev = dev_get_by_name_rcu(sock_net(sk), name);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ } else if (ifindex) {
+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
+ if (dev)
+ dev_hold(dev);
proto_curr = po->prot_hook.type;
dev_curr = po->prot_hook.dev;
@@ -2934,14 +2952,29 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
need_rehook = proto_curr != proto || dev_curr != dev;
if (need_rehook) {
- unregister_prot_hook(sk, true);
+ if (po->running) {
+ rcu_read_unlock();
+ __unregister_prot_hook(sk, true);
+ rcu_read_lock();
+ dev_curr = po->prot_hook.dev;
+ if (dev)
+ unlisted = !dev_get_by_index_rcu(sock_net(sk),
+ dev->ifindex);
+ }
po->num = proto;
po->prot_hook.type = proto;
- po->prot_hook.dev = dev;
- po->ifindex = dev ? dev->ifindex : 0;
- packet_cached_dev_assign(po, dev);
+ if (unlikely(unlisted)) {
+ dev_put(dev);
+ po->prot_hook.dev = NULL;
+ po->ifindex = -1;
+ packet_cached_dev_reset(po);
+ } else {
+ po->prot_hook.dev = dev;
+ po->ifindex = dev ? dev->ifindex : 0;
+ packet_cached_dev_assign(po, dev);
+ }
}
if (dev_curr)
dev_put(dev_curr);
@@ -2949,7 +2982,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
if (proto == 0 || !need_rehook)
goto out_unlock;
- if (!dev || (dev->flags & IFF_UP)) {
+ if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
register_prot_hook(sk);
} else {
sk->sk_err = ENETDOWN;
@@ -2958,9 +2991,10 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
}
out_unlock:
+ rcu_read_unlock();
spin_unlock(&po->bind_lock);
release_sock(sk);
- return 0;
+ return ret;
}
/*
@@ -2972,8 +3006,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
{
struct sock *sk = sock->sk;
char name[15];
- struct net_device *dev;
- int err = -ENODEV;
/*
* Check legality
@@ -2983,19 +3015,13 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EINVAL;
strlcpy(name, uaddr->sa_data, sizeof(name));
- dev = dev_get_by_name(sock_net(sk), name);
- if (dev)
- err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
- return err;
+ return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
}
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
struct sock *sk = sock->sk;
- struct net_device *dev = NULL;
- int err;
-
/*
* Check legality
@@ -3006,16 +3032,8 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
if (sll->sll_family != AF_PACKET)
return -EINVAL;
- if (sll->sll_ifindex) {
- err = -ENODEV;
- dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
- if (dev == NULL)
- goto out;
- }
- err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
-
-out:
- return err;
+ return packet_do_bind(sk, NULL, sll->sll_ifindex,
+ sll->sll_protocol ? : pkt_sk(sk)->num);
}
static struct proto packet_proto = {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 536838b657bf..fbfec6a18839 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -22,6 +22,7 @@
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <net/inet_sock.h>
#include <net/pkt_cls.h>
#include <net/ip.h>
@@ -197,8 +198,11 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb)
static u32 flow_get_skuid(const struct sk_buff *skb)
{
- if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
- kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid;
+ struct sock *sk = skb_to_full_sk(skb);
+
+ if (sk && sk->sk_socket && sk->sk_socket->file) {
+ kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
+
return from_kuid(&init_user_ns, skuid);
}
return 0;
@@ -206,8 +210,11 @@ static u32 flow_get_skuid(const struct sk_buff *skb)
static u32 flow_get_skgid(const struct sk_buff *skb)
{
- if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
- kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid;
+ struct sock *sk = skb_to_full_sk(skb);
+
+ if (sk && sk->sk_socket && sk->sk_socket->file) {
+ kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
+
return from_kgid(&init_user_ns, skgid);
}
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index b5294ce20cd4..f2aabc0089da 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -343,119 +343,145 @@ META_COLLECTOR(int_sk_refcnt)
META_COLLECTOR(int_sk_rcvbuf)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_rcvbuf;
+ dst->value = sk->sk_rcvbuf;
}
META_COLLECTOR(int_sk_shutdown)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_shutdown;
+ dst->value = sk->sk_shutdown;
}
META_COLLECTOR(int_sk_proto)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_protocol;
+ dst->value = sk->sk_protocol;
}
META_COLLECTOR(int_sk_type)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_type;
+ dst->value = sk->sk_type;
}
META_COLLECTOR(int_sk_rmem_alloc)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = sk_rmem_alloc_get(skb->sk);
+ dst->value = sk_rmem_alloc_get(sk);
}
META_COLLECTOR(int_sk_wmem_alloc)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = sk_wmem_alloc_get(skb->sk);
+ dst->value = sk_wmem_alloc_get(sk);
}
META_COLLECTOR(int_sk_omem_alloc)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = atomic_read(&skb->sk->sk_omem_alloc);
+ dst->value = atomic_read(&sk->sk_omem_alloc);
}
META_COLLECTOR(int_sk_rcv_qlen)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_receive_queue.qlen;
+ dst->value = sk->sk_receive_queue.qlen;
}
META_COLLECTOR(int_sk_snd_qlen)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_write_queue.qlen;
+ dst->value = sk->sk_write_queue.qlen;
}
META_COLLECTOR(int_sk_wmem_queued)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_wmem_queued;
+ dst->value = sk->sk_wmem_queued;
}
META_COLLECTOR(int_sk_fwd_alloc)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_forward_alloc;
+ dst->value = sk->sk_forward_alloc;
}
META_COLLECTOR(int_sk_sndbuf)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_sndbuf;
+ dst->value = sk->sk_sndbuf;
}
META_COLLECTOR(int_sk_alloc)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = (__force int) skb->sk->sk_allocation;
+ dst->value = (__force int) sk->sk_allocation;
}
META_COLLECTOR(int_sk_hash)
@@ -469,92 +495,112 @@ META_COLLECTOR(int_sk_hash)
META_COLLECTOR(int_sk_lingertime)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_lingertime / HZ;
+ dst->value = sk->sk_lingertime / HZ;
}
META_COLLECTOR(int_sk_err_qlen)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_error_queue.qlen;
+ dst->value = sk->sk_error_queue.qlen;
}
META_COLLECTOR(int_sk_ack_bl)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_ack_backlog;
+ dst->value = sk->sk_ack_backlog;
}
META_COLLECTOR(int_sk_max_ack_bl)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_max_ack_backlog;
+ dst->value = sk->sk_max_ack_backlog;
}
META_COLLECTOR(int_sk_prio)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_priority;
+ dst->value = sk->sk_priority;
}
META_COLLECTOR(int_sk_rcvlowat)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_rcvlowat;
+ dst->value = sk->sk_rcvlowat;
}
META_COLLECTOR(int_sk_rcvtimeo)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_rcvtimeo / HZ;
+ dst->value = sk->sk_rcvtimeo / HZ;
}
META_COLLECTOR(int_sk_sndtimeo)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_sndtimeo / HZ;
+ dst->value = sk->sk_sndtimeo / HZ;
}
META_COLLECTOR(int_sk_sendmsg_off)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_frag.offset;
+ dst->value = sk->sk_frag.offset;
}
META_COLLECTOR(int_sk_write_pend)
{
- if (skip_nonlocal(skb)) {
+ const struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk) {
*err = -1;
return;
}
- dst->value = skb->sk->sk_write_pending;
+ dst->value = sk->sk_write_pending;
}
/**************************************************************************
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 400d87294de3..0a369bb440e7 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1234,7 +1234,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
/* Callers of accept() will be be waiting on the listening socket, not
* the pending socket.
*/
- listener->sk_state_change(listener);
+ listener->sk_data_ready(listener);
return 0;