summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c9
-rw-r--r--net/bluetooth/hci_event.c48
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/hci_sync.c26
-rw-r--r--net/can/isotp.c19
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/core/datagram.c75
-rw-r--r--net/core/dev.c73
-rw-r--r--net/core/dev.h3
-rw-r--r--net/core/neighbour.c67
-rw-r--r--net/core/pktgen.c14
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c40
-rw-r--r--net/core/stream.c12
-rw-r--r--net/devlink/health.c30
-rw-r--r--net/handshake/netlink.c30
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/esp4.c6
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_hashtables.c24
-rw-r--r--net/ipv4/tcp.c24
-rw-r--r--net/ipv4/tcp_bpf.c12
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c26
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/mac80211/key.c3
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mctp/route.c22
-rw-r--r--net/mptcp/protocol.c43
-rw-r--r--net/netfilter/nf_flow_table_core.c14
-rw-r--r--net/netfilter/nf_tables_api.c70
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_inner.c1
-rw-r--r--net/netfilter/nft_payload.c2
-rw-r--r--net/netfilter/nft_set_pipapo.h2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/nfc/llcp_core.c30
-rw-r--r--net/nfc/nci/core.c5
-rw-r--r--net/nfc/nci/spi.c2
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rfkill/core.c5
-rw-r--r--net/rfkill/rfkill-gpio.c4
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_hfsc.c18
-rw-r--r--net/smc/Kconfig1
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/smc/smc_ib.c7
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/smc/smc_stats.h14
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c13
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/svc.c155
-rw-r--r--net/sunrpc/svc_xprt.c236
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/tls/tls_main.c10
-rw-r--r--net/tls/tls_sw.c19
-rw-r--r--net/vmw_vsock/virtio_transport.c18
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xdp/xsk_queue.c10
-rw-r--r--net/xfrm/xfrm_interface_core.c22
-rw-r--r--net/xfrm/xfrm_policy.c27
70 files changed, 835 insertions, 566 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a6f20338db8..73470cc3518a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1627,6 +1627,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EOPNOTSUPP);
}
+ /* Reject outgoing connection to device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (!bacmp(&hdev->bdaddr, dst)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ dst);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 31d02b54eea1..1e1c9147356c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -26,6 +26,8 @@
/* Bluetooth HCI event handling. */
#include <asm/unaligned.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -3268,6 +3270,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
+ /* Reject incoming connection from device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ &ev->bdaddr);
+ hci_reject_conn(hdev, &ev->bdaddr);
+ return;
+ }
+
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
&flags);
@@ -4742,6 +4754,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
if (!conn)
goto unlock;
+ /* Ignore NULL link key against CVE-2020-26555 */
+ if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
+ bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
+ &ev->bdaddr);
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_drop(conn);
@@ -5274,8 +5295,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* available, then do not declare that OOB data is
* present.
*/
- if (!memcmp(data->rand256, ZERO_KEY, 16) ||
- !memcmp(data->hash256, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash256, ZERO_KEY, 16))
return 0x00;
return 0x02;
@@ -5285,8 +5306,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* not supported by the hardware, then check that if
* P-192 data values are present.
*/
- if (!memcmp(data->rand192, ZERO_KEY, 16) ||
- !memcmp(data->hash192, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash192, ZERO_KEY, 16))
return 0x00;
return 0x01;
@@ -5303,7 +5324,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
hci_conn_hold(conn);
@@ -5550,7 +5571,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
/* Reset the authentication requirement to unknown */
@@ -7021,6 +7042,14 @@ unlock:
hci_dev_unlock(hdev);
}
+static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
+{
+ u8 handle = PTR_UINT(data);
+
+ return hci_le_terminate_big_sync(hdev, handle,
+ HCI_ERROR_LOCAL_HOST_TERM);
+}
+
static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -7065,16 +7094,17 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
rcu_read_lock();
}
+ rcu_read_unlock();
+
if (!ev->status && !i)
/* If no BISes have been connected for the BIG,
* terminate. This is in case all bound connections
* have been closed before the BIG creation
* has completed.
*/
- hci_le_terminate_big_sync(hdev, ev->handle,
- HCI_ERROR_LOCAL_HOST_TERM);
+ hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
+ UINT_PTR(ev->handle), NULL);
- rcu_read_unlock();
hci_dev_unlock(hdev);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5e4f718073b7..3e7cd330d731 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -488,7 +488,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
- memcpy(ni->name, hdev->name, 8);
+ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+ strnlen(hdev->name, sizeof(ni->name)), '\0');
opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
break;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index d06e07a0ea5a..a15ab0b874a9 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -5369,6 +5369,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
{
int err = 0;
u16 handle = conn->handle;
+ bool disconnect = false;
struct hci_conn *c;
switch (conn->state) {
@@ -5399,24 +5400,15 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
hci_dev_unlock(hdev);
return 0;
case BT_BOUND:
- hci_dev_lock(hdev);
- hci_conn_failed(conn, reason);
- hci_dev_unlock(hdev);
- return 0;
+ break;
default:
- hci_dev_lock(hdev);
- conn->state = BT_CLOSED;
- hci_disconn_cfm(conn, reason);
- hci_conn_del(conn);
- hci_dev_unlock(hdev);
- return 0;
+ disconnect = true;
+ break;
}
hci_dev_lock(hdev);
- /* Check if the connection hasn't been cleanup while waiting
- * commands to complete.
- */
+ /* Check if the connection has been cleaned up concurrently */
c = hci_conn_hash_lookup_handle(hdev, handle);
if (!c || c != conn) {
err = 0;
@@ -5428,7 +5420,13 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
* or in case of LE it was still scanning so it can be cleanup
* safely.
*/
- hci_conn_failed(conn, reason);
+ if (disconnect) {
+ conn->state = BT_CLOSED;
+ hci_disconn_cfm(conn, reason);
+ hci_conn_del(conn);
+ } else {
+ hci_conn_failed(conn, reason);
+ }
unlock:
hci_dev_unlock(hdev);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index f02b5d3e4733..d1c6f206f429 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -948,21 +948,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
-wait_free_buffer:
- /* we do not support multiple buffers - for now */
- if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
- return -EAGAIN;
+ while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+ /* we do not support multiple buffers - for now */
+ if (msg->msg_flags & MSG_DONTWAIT)
+ return -EAGAIN;
- /* wait for complete transmission of current pdu */
- err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
- if (err)
- goto err_event_drop;
-
- if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
if (so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
- goto wait_free_buffer;
+ /* wait for complete transmission of current pdu */
+ err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ if (err)
+ goto err_event_drop;
}
/* PDU size > default => try max_pdu_size */
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 10a41cd9c523..3c8b78d9c4d1 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -459,8 +459,8 @@ int ceph_tcp_connect(struct ceph_connection *con)
set_sock_callbacks(sock, con);
con_sock_state_connecting(con);
- ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
- O_NONBLOCK);
+ ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
+ O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
ceph_pr_addr(&con->peer_addr),
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 176eb5834746..103d46fa0eeb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -50,7 +50,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/uio.h>
+#include <linux/iov_iter.h>
#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
@@ -61,6 +61,7 @@
#include <net/tcp_states.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
+#include <crypto/hash.h>
/*
* Is a socket 'connection oriented' ?
@@ -489,6 +490,24 @@ short_copy:
return 0;
}
+static size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+#ifdef CONFIG_CRYPTO_HASH
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+#else
+ return 0;
+#endif
+}
+
/**
* skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
* and update a hash.
@@ -716,6 +735,60 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
}
EXPORT_SYMBOL(zerocopy_sg_from_iter);
+static __always_inline
+size_t copy_to_user_iter_csum(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ __wsum next, *csum = priv2;
+
+ next = csum_and_copy_to_user(from + progress, iter_to, len);
+ *csum = csum_block_add(*csum, next, progress);
+ return next ? 0 : len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ __wsum *csum = priv2;
+ __wsum next = csum_partial_copy_nocheck(from, iter_to, len);
+
+ *csum = csum_block_add(*csum, next, progress);
+ return 0;
+}
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+static size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
+ struct iov_iter *i)
+{
+ struct csum_state *csstate = _csstate;
+ __wsum sum;
+
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (unlikely(iov_iter_is_discard(i))) {
+ // can't use csum_memcpy() for that one - data is not copied
+ csstate->csum = csum_block_add(csstate->csum,
+ csum_partial(addr, bytes, 0),
+ csstate->off);
+ csstate->off += bytes;
+ return bytes;
+ }
+
+ sum = csum_shift(csstate->csum, csstate->off);
+
+ bytes = iterate_and_advance2(i, bytes, (void *)addr, &sum,
+ copy_to_user_iter_csum,
+ memcpy_to_iter_csum);
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
+ return bytes;
+}
+
/**
* skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
* and update a checksum.
diff --git a/net/core/dev.c b/net/core/dev.c
index 85df22f05c38..9f3f8930c691 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -345,7 +345,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
{
list_del(&name_node->list);
- netdev_name_node_del(name_node);
kfree(name_node->name);
netdev_name_node_free(name_node);
}
@@ -364,6 +363,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
+ netdev_name_node_del(name_node);
+ synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
return 0;
@@ -380,6 +381,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
/* Device list insertion */
static void list_netdevice(struct net_device *dev)
{
+ struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
@@ -390,6 +392,10 @@ static void list_netdevice(struct net_device *dev)
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock(&dev_base_lock);
+
+ netdev_for_each_altname(dev, name_node)
+ netdev_name_node_add(net, name_node);
+
/* We reserved the ifindex, this can't fail */
WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
@@ -401,12 +407,16 @@ static void list_netdevice(struct net_device *dev)
*/
static void unlist_netdevice(struct net_device *dev, bool lock)
{
+ struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
xa_erase(&net->dev_by_index, dev->ifindex);
+ netdev_for_each_altname(dev, name_node)
+ netdev_name_node_del(name_node);
+
/* Unlink dev from the device chain */
if (lock)
write_lock(&dev_base_lock);
@@ -1086,7 +1096,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
for_each_netdev(net, d) {
struct netdev_name_node *name_node;
- list_for_each_entry(name_node, &d->name_node->list, list) {
+
+ netdev_for_each_altname(d, name_node) {
if (!sscanf(name_node->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
@@ -1123,6 +1134,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
return -ENFILE;
}
+static int dev_prep_valid_name(struct net *net, struct net_device *dev,
+ const char *want_name, char *out_name)
+{
+ int ret;
+
+ if (!dev_valid_name(want_name))
+ return -EINVAL;
+
+ if (strchr(want_name, '%')) {
+ ret = __dev_alloc_name(net, want_name, out_name);
+ return ret < 0 ? ret : 0;
+ } else if (netdev_name_in_use(net, want_name)) {
+ return -EEXIST;
+ } else if (out_name != want_name) {
+ strscpy(out_name, want_name, IFNAMSIZ);
+ }
+
+ return 0;
+}
+
static int dev_alloc_name_ns(struct net *net,
struct net_device *dev,
const char *name)
@@ -1160,19 +1191,13 @@ EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
- BUG_ON(!net);
-
- if (!dev_valid_name(name))
- return -EINVAL;
-
- if (strchr(name, '%'))
- return dev_alloc_name_ns(net, dev, name);
- else if (netdev_name_in_use(net, name))
- return -EEXIST;
- else if (dev->name != name)
- strscpy(dev->name, name, IFNAMSIZ);
+ char buf[IFNAMSIZ];
+ int ret;
- return 0;
+ ret = dev_prep_valid_name(net, dev, name, buf);
+ if (ret >= 0)
+ strscpy(dev->name, buf, IFNAMSIZ);
+ return ret;
}
/**
@@ -3292,15 +3317,19 @@ int skb_checksum_help(struct sk_buff *skb)
offset = skb_checksum_start_offset(skb);
ret = -EINVAL;
- if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
+ if (unlikely(offset >= skb_headlen(skb))) {
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+ WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
+ offset, skb_headlen(skb));
goto out;
}
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
- if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
+ if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+ WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
+ offset + sizeof(__sum16), skb_headlen(skb));
goto out;
}
ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
@@ -11033,7 +11062,9 @@ EXPORT_SYMBOL(unregister_netdev);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex)
{
+ struct netdev_name_node *name_node;
struct net *net_old = dev_net(dev);
+ char new_name[IFNAMSIZ] = {};
int err, new_nsid;
ASSERT_RTNL();
@@ -11060,10 +11091,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- err = dev_get_valid_name(net, dev, pat);
+ err = dev_prep_valid_name(net, dev, pat, new_name);
if (err < 0)
goto out;
}
+ /* Check that none of the altnames conflicts. */
+ err = -EEXIST;
+ netdev_for_each_altname(dev, name_node)
+ if (netdev_name_in_use(net, name_node->name))
+ goto out;
/* Check that new_ifindex isn't used yet. */
if (new_ifindex) {
@@ -11131,6 +11167,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
netdev_adjacent_add_links(dev);
+ if (new_name[0]) /* Rename the netdev to prepared name */
+ strscpy(dev->name, new_name, IFNAMSIZ);
+
/* Fixup kobjects */
err = device_rename(&dev->dev, dev->name);
WARN_ON(err);
diff --git a/net/core/dev.h b/net/core/dev.h
index e075e198092c..fa2e9c5c4122 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -62,6 +62,9 @@ struct netdev_name_node {
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_change_name(struct net_device *dev, const char *newname);
+#define netdev_for_each_altname(dev, namenode) \
+ list_for_each_entry((namenode), &(dev)->name_node->list, list)
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9c09f091cbff..df81c1f0a570 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
- int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
}
}
- tbl->last_flush = jiffies;
+ WRITE_ONCE(tbl->last_flush, jiffies);
write_unlock_bh(&tbl->lock);
@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
- int entries;
+ int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
- if (entries >= tbl->gc_thresh3 ||
- (entries >= tbl->gc_thresh2 &&
- time_after(now, tbl->last_flush + 5 * HZ))) {
- if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3) {
+ gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+ if (entries >= gc_thresh3 ||
+ (entries >= READ_ONCE(tbl->gc_thresh2) &&
+ time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
- tbl->last_rand = jiffies;
+
+ WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
- nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
- nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
- nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
- nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+ nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+ NDTA_PAD) ||
+ nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+ nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+ nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
- long flush_delta = now - tbl->last_flush;
- long rand_delta = now - tbl->last_rand;
+ long flush_delta = now - READ_ONCE(tbl->last_flush);
+ long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
- .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock();
@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
- ndst.ndts_allocs += st->allocs;
- ndst.ndts_destroys += st->destroys;
- ndst.ndts_hash_grows += st->hash_grows;
- ndst.ndts_res_failed += st->res_failed;
- ndst.ndts_lookups += st->lookups;
- ndst.ndts_hits += st->hits;
- ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
- ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
- ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
- ndst.ndts_forced_gc_runs += st->forced_gc_runs;
- ndst.ndts_table_fulls += st->table_fulls;
+ ndst.ndts_allocs += READ_ONCE(st->allocs);
+ ndst.ndts_destroys += READ_ONCE(st->destroys);
+ ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
+ ndst.ndts_res_failed += READ_ONCE(st->res_failed);
+ ndst.ndts_lookups += READ_ONCE(st->lookups);
+ ndst.ndts_hits += READ_ONCE(st->hits);
+ ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
+ ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
+ ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
+ ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
+ ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
- tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+ WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
- tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+ WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
- tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+ WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
- tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+ WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f56b8d697014..4d1696677c48 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_puts(seq, " Flags: ");
for (i = 0; i < NR_PKT_FLAGS; i++) {
- if (i == F_FLOW_SEQ)
+ if (i == FLOW_SEQ_SHIFT)
if (!pkt_dev->cflows)
continue;
- if (pkt_dev->flags & (1 << i))
+ if (pkt_dev->flags & (1 << i)) {
seq_printf(seq, "%s ", pkt_flag_names[i]);
- else if (i == F_FLOW_SEQ)
- seq_puts(seq, "FLOW_RND ");
-
#ifdef CONFIG_XFRM
- if (i == F_IPSEC && pkt_dev->spi)
- seq_printf(seq, "spi:%u", pkt_dev->spi);
+ if (i == IPSEC_SHIFT && pkt_dev->spi)
+ seq_printf(seq, "spi:%u ", pkt_dev->spi);
#endif
+ } else if (i == FLOW_SEQ_SHIFT) {
+ seq_puts(seq, "FLOW_RND ");
+ }
}
seq_puts(seq, "\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4a2ec33bfb51..53c377d054f0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -5503,13 +5503,11 @@ static unsigned int
rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
- bool enabled = netdev_offload_xstats_enabled(dev, type);
-
return nla_total_size(0) +
/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
nla_total_size(sizeof(u8)) +
/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
- (enabled ? nla_total_size(sizeof(u8)) : 0) +
+ nla_total_size(sizeof(u8)) +
0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4eaf7ed0d1f4..2bfa6a7ba244 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,6 +62,7 @@
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/kcov.h>
+#include <linux/iov_iter.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -6931,3 +6932,42 @@ out:
return spliced ?: ret;
}
EXPORT_SYMBOL(skb_splice_from_iter);
+
+static __always_inline
+size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ __wsum *csum = priv2;
+ __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len);
+
+ *csum = csum_block_add(*csum, next, progress);
+ return 0;
+}
+
+static __always_inline
+size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ __wsum next, *csum = priv2;
+
+ next = csum_and_copy_from_user(iter_from, to + progress, len);
+ *csum = csum_block_add(*csum, next, progress);
+ return next ? 0 : len;
+}
+
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ size_t copied;
+
+ if (WARN_ON_ONCE(!i->data_source))
+ return false;
+ copied = iterate_and_advance2(i, bytes, addr, csum,
+ copy_from_user_iter_csum,
+ memcpy_from_iter_csum);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter_full);
diff --git a/net/core/stream.c b/net/core/stream.c
index f5c4e47df165..96fbcb9bbb30 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close);
*/
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
{
- int err = 0;
+ int ret, err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
- (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
- (sk_stream_memory_free(sk) &&
- !vm_wait), &wait);
+ ret = sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
+ (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+ (sk_stream_memory_free(sk) && !vm_wait),
+ &wait);
sk->sk_write_pending--;
+ if (ret < 0)
+ goto do_error;
if (vm_wait) {
vm_wait -= current_timeo;
diff --git a/net/devlink/health.c b/net/devlink/health.c
index 638cad8d5c65..51e6e81e31bb 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -58,7 +58,6 @@ struct devlink_health_reporter {
struct devlink *devlink;
struct devlink_port *devlink_port;
struct devlink_fmsg *dump_fmsg;
- struct mutex dump_lock; /* lock parallel read/write from dump buffers */
u64 graceful_period;
bool auto_recover;
bool auto_dump;
@@ -125,7 +124,6 @@ __devlink_health_reporter_create(struct devlink *devlink,
reporter->graceful_period = graceful_period;
reporter->auto_recover = !!ops->recover;
reporter->auto_dump = !!ops->dump;
- mutex_init(&reporter->dump_lock);
return reporter;
}
@@ -226,7 +224,6 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
static void
devlink_health_reporter_free(struct devlink_health_reporter *reporter)
{
- mutex_destroy(&reporter->dump_lock);
if (reporter->dump_fmsg)
devlink_fmsg_free(reporter->dump_fmsg);
kfree(reporter);
@@ -625,10 +622,10 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
}
if (reporter->auto_dump) {
- mutex_lock(&reporter->dump_lock);
+ devl_lock(devlink);
/* store current dump of current error, for later analysis */
devlink_health_do_dump(reporter, priv_ctx, NULL);
- mutex_unlock(&reporter->dump_lock);
+ devl_unlock(devlink);
}
if (!reporter->auto_recover)
@@ -1262,7 +1259,7 @@ out:
}
static struct devlink_health_reporter *
-devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
+devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
{
const struct genl_info *info = genl_info_dump(cb);
struct devlink_health_reporter *reporter;
@@ -1272,10 +1269,12 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
if (IS_ERR(devlink))
return NULL;
- devl_unlock(devlink);
reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
- devlink_put(devlink);
+ if (!reporter) {
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ }
return reporter;
}
@@ -1284,16 +1283,20 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_health_reporter *reporter;
+ struct devlink *devlink;
int err;
- reporter = devlink_health_reporter_get_from_cb(cb);
+ reporter = devlink_health_reporter_get_from_cb_lock(cb);
if (!reporter)
return -EINVAL;
- if (!reporter->ops->dump)
+ devlink = reporter->devlink;
+ if (!reporter->ops->dump) {
+ devl_unlock(devlink);
+ devlink_put(devlink);
return -EOPNOTSUPP;
+ }
- mutex_lock(&reporter->dump_lock);
if (!state->idx) {
err = devlink_health_do_dump(reporter, NULL, cb->extack);
if (err)
@@ -1309,7 +1312,8 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
unlock:
- mutex_unlock(&reporter->dump_lock);
+ devl_unlock(devlink);
+ devlink_put(devlink);
return err;
}
@@ -1326,9 +1330,7 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
if (!reporter->ops->dump)
return -EOPNOTSUPP;
- mutex_lock(&reporter->dump_lock);
devlink_health_dump_clear(reporter);
- mutex_unlock(&reporter->dump_lock);
return 0;
}
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index d0bc1dd8e65a..80c7302692c7 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
}
EXPORT_SYMBOL(handshake_genl_put);
-/*
- * dup() a kernel socket for use as a user space file descriptor
- * in the current process. The kernel socket must have an
- * instatiated struct file.
- *
- * Implicit argument: "current()"
- */
-static int handshake_dup(struct socket *sock)
-{
- struct file *file;
- int newfd;
-
- file = get_file(sock->file);
- newfd = get_unused_fd_flags(O_CLOEXEC);
- if (newfd < 0) {
- fput(file);
- return newfd;
- }
-
- fd_install(newfd, file);
- return newfd;
-}
-
int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
@@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
goto out_status;
sock = req->hr_sk->sk_socket;
- fd = handshake_dup(sock);
+ fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
err = fd;
goto out_complete;
}
+
err = req->hr_proto->hp_accept(req, info, fd);
if (err) {
- fput(sock->file);
+ put_unused_fd(fd);
goto out_complete;
}
+ fd_install(fd, get_file(sock->file));
+
trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
return 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3d2e30e20473..2713c9b06c4c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -597,7 +597,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
- sk->sk_wait_pending++;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -613,7 +612,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
}
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
- sk->sk_wait_pending--;
return timeo;
}
@@ -642,6 +640,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC) {
+ sk->sk_disconnects++;
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
@@ -696,6 +695,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
tcp_sk(sk)->fastopen_req &&
tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+ int dis = sk->sk_disconnects;
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
@@ -704,6 +704,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
+
+ if (dis != sk->sk_disconnects) {
+ err = -EPIPE;
+ goto out;
+ }
}
/* Connection was closed by RST, timeout, ICMP error
@@ -725,6 +730,7 @@ out:
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
+ sk->sk_disconnects++;
if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 2be2d4922557..4ccfc104f13a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
@@ -784,7 +786,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 1ea82bc33ef1..5eb1b8d302bb 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
unsigned char scope)
{
struct fib_nh *nh;
+ __be32 saddr;
if (nhc->nhc_family != AF_INET)
return inet_select_addr(nhc->nhc_dev, 0, scope);
nh = container_of(nhc, struct fib_nh, nh_common);
- nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+ saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- return nh->nh_saddr;
+ WRITE_ONCE(nh->nh_saddr, saddr);
+ WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
+
+ return saddr;
}
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
- if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
- return nh->nh_saddr;
+ if (READ_ONCE(nh->nh_saddr_genid) ==
+ atomic_read(&net->ipv4.dev_addr_genid))
+ return READ_ONCE(nh->nh_saddr);
}
return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index aeebe8816689..394a498c2823 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1145,7 +1145,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
- newsk->sk_wait_pending = 0;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
newicsk->icsk_bind2_hash = NULL;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c32f5e28758b..598c1b114d2c 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -149,8 +149,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb2->family)
- return false;
+ if (sk->sk_family != tb2->family) {
+ if (sk->sk_family == AF_INET)
+ return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
+ tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
+ return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
+ sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
+ }
if (sk->sk_family == AF_INET6)
return ipv6_addr_equal(&tb2->v6_rcv_saddr,
@@ -819,19 +825,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
tb->l3mdev != l3mdev)
return false;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb->family) {
- if (sk->sk_family == AF_INET)
- return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
- tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
-
- return false;
- }
-
- if (sk->sk_family == AF_INET6)
- return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
-#endif
- return tb->rcv_saddr == sk->sk_rcv_saddr;
+ return inet_bind2_bucket_addr_match(tb, sk);
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3f66cdeef7de..3d3a24f79573 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -831,7 +831,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
*/
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0)
+ break;
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
break;
@@ -925,10 +927,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now;
}
-/* In some cases, both sendmsg() could have added an skb to the write queue,
- * but failed adding payload on it. We need to remove it to consume less
+/* In some cases, sendmsg() could have added an skb to the write queue,
+ * but failed adding payload on it. We need to remove it to consume less
* memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
- * epoll() users.
+ * epoll() users. Another reason is that tcp_write_xmit() does not like
+ * finding an empty skb in the write queue.
*/
void tcp_remove_empty_skb(struct sock *sk)
{
@@ -1287,6 +1290,7 @@ new_segment:
wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ tcp_remove_empty_skb(sk);
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -2442,7 +2446,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
__sk_flush_backlog(sk);
} else {
tcp_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, last);
+ err = sk_wait_data(sk, &timeo, last);
+ if (err < 0) {
+ err = copied ? : err;
+ goto out;
+ }
}
if ((flags & MSG_PEEK) &&
@@ -2966,12 +2974,6 @@ int tcp_disconnect(struct sock *sk, int flags)
int old_state = sk->sk_state;
u32 seq;
- /* Deny disconnect if other threads are blocked in sk_wait_event()
- * or inet_wait_for_connect().
- */
- if (sk->sk_wait_pending)
- return -EBUSY;
-
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 327268203001..53b0d62fd2c2 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -307,6 +307,10 @@ msg_bytes_ready:
}
data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data < 0) {
+ copied = data;
+ goto unlock;
+ }
if (data && !sk_psock_queue_empty(psock))
goto msg_bytes_ready;
copied = -EAGAIN;
@@ -317,6 +321,8 @@ out:
tcp_rcv_space_adjust(sk);
if (copied > 0)
__tcp_cleanup_rbuf(sk, copied);
+
+unlock:
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
@@ -351,6 +357,10 @@ msg_bytes_ready:
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data < 0) {
+ ret = data;
+ goto unlock;
+ }
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
@@ -361,6 +371,8 @@ msg_bytes_ready:
copied = -EAGAIN;
}
ret = copied;
+
+unlock:
release_sock(sk);
sk_psock_put(sk, psock);
return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8afb0950a697..804821d6bd4d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2207,16 +2207,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
- if (flag & FLAG_SACK_RENEGING &&
- flag & FLAG_SND_UNA_ADVANCED) {
+ if (*ack_flag & FLAG_SACK_RENEGING &&
+ *ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@@ -2986,7 +2987,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
- if (tcp_check_sack_reneging(sk, flag))
+ if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 27140e5cdc06..4167e8a48b60 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1869,6 +1869,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TLS_DEVICE
tail->decrypted != skb->decrypted ||
#endif
+ !mptcp_skb_can_collapse(tail, skb) ||
thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index aa0fc8c766e5..f0723460753c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2456,6 +2456,7 @@ static int tcp_mtu_probe(struct sock *sk)
/* build the payload, and be prepared to abort if this fails. */
if (tcp_clone_payload(sk, nskb, probe_size)) {
+ tcp_skb_tsorted_anchor_cleanup(nskb);
consume_skb(nskb);
return -1;
}
@@ -2541,6 +2542,18 @@ static bool tcp_pacing_check(struct sock *sk)
return true;
}
+static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
+{
+ const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
+
+ /* No skb in the rtx queue. */
+ if (!node)
+ return true;
+
+ /* Only one skb in rtx queue. */
+ return !node->rb_left && !node->rb_right;
+}
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)
@@ -2578,12 +2591,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit += extra_bytes;
}
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
- /* Always send skb if rtx queue is empty.
+ /* Always send skb if rtx queue is empty or has one skb.
* No need to wait for TX completion to call us back,
* after softirq/tasklet schedule.
* This helps when TX completions are delayed too much.
*/
- if (tcp_rtx_queue_empty(sk))
+ if (tcp_rtx_queue_empty_or_single_skb(sk))
return false;
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
@@ -2787,7 +2800,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- u32 timeout, rto_delta_us;
+ u32 timeout, timeout_us, rto_delta_us;
int early_retrans;
/* Don't do any loss probe on a Fast Open connection before 3WHS
@@ -2811,11 +2824,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
* sample is available then probe after TCP_TIMEOUT_INIT.
*/
if (tp->srtt_us) {
- timeout = usecs_to_jiffies(tp->srtt_us >> 2);
+ timeout_us = tp->srtt_us >> 2;
if (tp->packets_out == 1)
- timeout += TCP_RTO_MIN;
+ timeout_us += tcp_rto_min_us(sk);
else
- timeout += TCP_TIMEOUT_MIN;
+ timeout_us += TCP_TIMEOUT_MIN_US;
+ timeout = usecs_to_jiffies(timeout_us);
} else {
timeout = TCP_TIMEOUT_INIT;
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index acf4869c5d3b..bba10110fbbc 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
tp->rack.advanced = 0;
tcp_rack_detect_loss(sk, &timeout);
if (timeout) {
- timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
+ timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index fddd0cbdede1..2cc1a45742d8 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
@@ -831,7 +833,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 41a680c76d2e..42fb6996b077 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -117,10 +117,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (likely(xdst->u.rt6.rt6i_idev))
- in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
rt6_uncached_list_del(&xdst->u.rt6);
+ if (likely(xdst->u.rt6.rt6i_idev))
+ in6_dev_put(xdst->u.rt6.rt6i_idev);
xfrm_dst_destroy(xdst);
}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0665ff5e456e..a2db0585dce0 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -912,7 +912,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
ret = -EALREADY;
- goto unlock;
+ goto out;
}
key->local = sdata->local;
@@ -940,7 +940,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
out:
ieee80211_key_free_unused(key);
- unlock:
mutex_unlock(&sdata->local->key_mtx);
return ret;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index e751cda5eef6..8f6b6f56b65b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2468,8 +2468,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
/* drop unicast public action frames when using MPF */
if (is_unicast_ether_addr(mgmt->da) &&
- ieee80211_is_public_action((void *)rx->skb->data,
- rx->skb->len))
+ ieee80211_is_protected_dual_of_public_action(rx->skb))
return -EACCES;
}
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ab62fe447038..7a47a58aa54b 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
{
struct mctp_route *tmp, *rt = NULL;
+ rcu_read_lock();
+
list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
/* TODO: add metrics */
if (mctp_rt_match_eid(tmp, dnet, daddr)) {
@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
}
}
+ rcu_read_unlock();
+
return rt;
}
static struct mctp_route *mctp_route_lookup_null(struct net *net,
struct net_device *dev)
{
- struct mctp_route *rt;
+ struct mctp_route *tmp, *rt = NULL;
- list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
- if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
- refcount_inc_not_zero(&rt->refs))
- return rt;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
+ if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
+ refcount_inc_not_zero(&tmp->refs)) {
+ rt = tmp;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+
+ return rt;
}
static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c3b83cb390d9..886ab689a8ae 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1298,7 +1298,7 @@ alloc_skb:
if (copy == 0) {
u64 snd_una = READ_ONCE(msk->snd_una);
- if (snd_una != msk->snd_nxt) {
+ if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
tcp_remove_empty_skb(ssk);
return 0;
}
@@ -1306,11 +1306,6 @@ alloc_skb:
zero_window_probe = true;
data_seq = snd_una - 1;
copy = 1;
-
- /* all mptcp-level data is acked, no skbs should be present into the
- * ssk write queue
- */
- WARN_ON_ONCE(reuse_skb);
}
copy = min_t(size_t, copy, info->limit - info->sent);
@@ -1339,7 +1334,6 @@ alloc_skb:
if (reuse_skb) {
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
mpext->data_len += copy;
- WARN_ON_ONCE(zero_window_probe);
goto out;
}
@@ -2354,6 +2348,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
#define MPTCP_CF_PUSH BIT(1)
#define MPTCP_CF_FASTCLOSE BIT(2)
+/* be sure to send a reset only if the caller asked for it, also
+ * clean completely the subflow status when the subflow reaches
+ * TCP_CLOSE state
+ */
+static void __mptcp_subflow_disconnect(struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ unsigned int flags)
+{
+ if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ (flags & MPTCP_CF_FASTCLOSE)) {
+ /* The MPTCP code never wait on the subflow sockets, TCP-level
+ * disconnect should never fail
+ */
+ WARN_ON_ONCE(tcp_disconnect(ssk, 0));
+ mptcp_subflow_ctx_reset(subflow);
+ } else {
+ tcp_shutdown(ssk, SEND_SHUTDOWN);
+ }
+}
+
/* subflow sockets can be either outgoing (connect) or incoming
* (accept).
*
@@ -2391,7 +2405,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
- /* be sure to force the tcp_disconnect() path,
+ /* be sure to force the tcp_close path
* to generate the egress reset
*/
ssk->sk_lingertime = 0;
@@ -2401,11 +2415,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
if (!dispose_it) {
- /* The MPTCP code never wait on the subflow sockets, TCP-level
- * disconnect should never fail
- */
- WARN_ON_ONCE(tcp_disconnect(ssk, 0));
- mptcp_subflow_ctx_reset(subflow);
+ __mptcp_subflow_disconnect(ssk, subflow, flags);
release_sock(ssk);
goto out;
@@ -3098,12 +3108,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- /* Deny disconnect if other threads are blocked in sk_wait_event()
- * or inet_wait_for_connect().
- */
- if (sk->sk_wait_pending)
- return -EBUSY;
-
/* We are on the fastopen error path. We can't call straight into the
* subflows cleanup code due to lock nesting (we are already under
* msk->firstsocket lock).
@@ -3173,7 +3177,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif
- nsk->sk_wait_pending = 0;
__mptcp_init_sock(nsk);
msk = mptcp_sk(nsk);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 1d34d700bd09..920a5a29ae1d 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
-static bool nf_flow_is_outdated(const struct flow_offload *flow)
-{
- return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
- !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
-}
-
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
+static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
+ const struct flow_offload *flow)
+{
+ return flow_table->type->gc && flow_table->type->gc(flow);
+}
+
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
- nf_flow_is_outdated(flow))
+ nf_flow_custom_gc(flow_table, flow))
flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a72b6aeefb1b..29c651804cb2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3166,7 +3166,7 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
if (err < 0)
return err;
- if (!tb[NFTA_EXPR_DATA])
+ if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
return -EINVAL;
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
@@ -5556,7 +5556,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- u64 timeout = 0;
nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
if (nest == NULL)
@@ -5592,15 +5591,11 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
htonl(*nft_set_ext_flags(ext))))
goto nla_put_failure;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
- timeout = *nft_set_ext_timeout(ext);
- if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
- nf_jiffies64_to_msecs(timeout),
- NFTA_SET_ELEM_PAD))
- goto nla_put_failure;
- } else if (set->flags & NFT_SET_TIMEOUT) {
- timeout = READ_ONCE(set->timeout);
- }
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+ nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
+ NFTA_SET_ELEM_PAD))
+ goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
u64 expires, now = get_jiffies_64();
@@ -5615,9 +5610,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
nf_jiffies64_to_msecs(expires),
NFTA_SET_ELEM_PAD))
goto nla_put_failure;
-
- if (reset)
- *nft_set_ext_expiration(ext) = now + timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
@@ -7615,6 +7607,16 @@ nla_put_failure:
return -1;
}
+static void audit_log_obj_reset(const struct nft_table *table,
+ unsigned int base_seq, unsigned int nentries)
+{
+ char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
+
+ audit_log_nfcfg(buf, table->family, nentries,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+}
+
struct nft_obj_filter {
char *table;
u32 type;
@@ -7629,8 +7631,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
+ unsigned int entries = 0;
struct nft_object *obj;
bool reset = false;
+ int rc = 0;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
@@ -7643,6 +7647,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
+ entries = 0;
list_for_each_entry_rcu(obj, &table->objects, list) {
if (!nft_is_active(net, obj))
goto cont;
@@ -7658,34 +7663,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
filter->type != NFT_OBJECT_UNSPEC &&
obj->ops->type->type != filter->type)
goto cont;
- if (reset) {
- char *buf = kasprintf(GFP_ATOMIC,
- "%s:%u",
- table->name,
- nft_net->base_seq);
-
- audit_log_nfcfg(buf,
- family,
- obj->handle,
- AUDIT_NFT_OP_OBJ_RESET,
- GFP_ATOMIC);
- kfree(buf);
- }
- if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWOBJ,
- NLM_F_MULTI | NLM_F_APPEND,
- table->family, table,
- obj, reset) < 0)
- goto done;
+ rc = nf_tables_fill_obj_info(skb, net,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWOBJ,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family, table,
+ obj, reset);
+ if (rc < 0)
+ break;
+ entries++;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
+ if (reset && entries)
+ audit_log_obj_reset(table, nft_net->base_seq, entries);
+ if (rc < 0)
+ break;
}
-done:
rcu_read_unlock();
cb->args[0] = idx;
@@ -7790,7 +7788,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
audit_log_nfcfg(buf,
family,
- obj->handle,
+ 1,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 53c9e76473ba..f03f4d4d7d88 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -698,8 +698,8 @@ nfulnl_log_packet(struct net *net,
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
+ enum ip_conntrack_info ctinfo = 0;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info ctinfo;
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;
diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
index 28e2873ba24e..928312d01eb1 100644
--- a/net/netfilter/nft_inner.c
+++ b/net/netfilter/nft_inner.c
@@ -298,6 +298,7 @@ static int nft_inner_init(const struct nft_ctx *ctx,
int err;
if (!tb[NFTA_INNER_FLAGS] ||
+ !tb[NFTA_INNER_NUM] ||
!tb[NFTA_INNER_HDRSIZE] ||
!tb[NFTA_INNER_TYPE] ||
!tb[NFTA_INNER_EXPR])
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 120f6d395b98..0a689c8e0295 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -179,7 +179,7 @@ void nft_payload_eval(const struct nft_expr *expr,
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
- if (!skb_mac_header_was_set(skb))
+ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
goto err;
if (skb_vlan_tag_present(skb) &&
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 25a75591583e..2e164a319945 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -147,7 +147,7 @@ struct nft_pipapo_match {
unsigned long * __percpu *scratch;
size_t bsize_max;
struct rcu_head rcu;
- struct nft_pipapo_field f[];
+ struct nft_pipapo_field f[] __counted_by(field_count);
};
/**
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 2660ceab3759..e34662f4a71e 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (nft_set_elem_expired(&rbe->ext)) {
+ break;
} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index 6705bb895e23..1dac28136e6a 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
llcp_sock = tmp_sock;
+ sock_hold(&llcp_sock->sk);
break;
}
}
read_unlock(&local->sockets.lock);
- if (llcp_sock == NULL)
- return NULL;
-
- sock_hold(&llcp_sock->sk);
-
return llcp_sock;
}
@@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
static
struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
- const u8 *sn, size_t sn_len)
+ const u8 *sn, size_t sn_len,
+ bool needref)
{
struct sock *sk;
struct nfc_llcp_sock *llcp_sock, *tmp_sock;
@@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
llcp_sock = tmp_sock;
+ if (needref)
+ sock_hold(&llcp_sock->sk);
break;
}
}
@@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
* to this service name.
*/
if (nfc_llcp_sock_from_sn(local, sock->service_name,
- sock->service_name_len) != NULL) {
+ sock->service_name_len,
+ false) != NULL) {
mutex_unlock(&local->sdp_lock);
return LLCP_SAP_MAX;
@@ -824,16 +824,7 @@ out:
static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
const u8 *sn, size_t sn_len)
{
- struct nfc_llcp_sock *llcp_sock;
-
- llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
-
- if (llcp_sock == NULL)
- return NULL;
-
- sock_hold(&llcp_sock->sk);
-
- return llcp_sock;
+ return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
}
static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
@@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
}
llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
- service_name_len);
+ service_name_len,
+ true);
if (!llcp_sock) {
sap = 0;
goto add_snl;
@@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
if (sap == LLCP_SAP_MAX) {
sap = 0;
+ nfc_llcp_sock_put(llcp_sock);
goto add_snl;
}
@@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
pr_debug("%p %d\n", llcp_sock, sap);
+ nfc_llcp_sock_put(llcp_sock);
add_snl:
sdp = nfc_llcp_build_sdres_tlv(tid, sap);
if (sdp == NULL)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index fff755dde30d..6c9592d05120 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -909,6 +909,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
return -EINVAL;
}
+ if (protocol >= NFC_PROTO_MAX) {
+ pr_err("the requested nfc protocol is invalid\n");
+ return -EINVAL;
+ }
+
if (!(nci_target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index 0935527d1d12..b68150c971d0 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
int ret;
skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
/* add the NCI SPI header to the start of the buffer */
hdr = skb_push(skb, NCI_SPI_HDR_LEN);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8f97648d652f..a84e00b5904b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3607,7 +3607,12 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
- memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
+
+ /* Let __fortify_memcpy_chk() know the actual buffer size. */
+ memcpy(((struct sockaddr_storage *)sll)->__data +
+ offsetof(struct sockaddr_ll, sll_addr) -
+ offsetofend(struct sockaddr_ll, sll_family),
+ dev->dev_addr, dev->addr_len);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 08630896b6c8..14cc8fe8584b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1180,7 +1180,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
init_waitqueue_head(&data->read_wait);
mutex_lock(&rfkill_global_mutex);
- mutex_lock(&data->mtx);
/*
* start getting events from elsewhere but hold mtx to get
* startup events added first
@@ -1192,10 +1191,11 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
goto free;
rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+ mutex_lock(&data->mtx);
list_add_tail(&ev->list, &data->events);
+ mutex_unlock(&data->mtx);
}
list_add(&data->list, &rfkill_fds);
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
file->private_data = data;
@@ -1203,7 +1203,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
return stream_open(inode, file);
free:
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
mutex_destroy(&data->mtx);
list_for_each_entry_safe(ev, tmp, &data->events, list)
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index e9d1b2f2ff0a..5a81505fba9a 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -108,13 +108,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rfkill->reset_gpio = gpio;
- gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 7c652d14528b..fb52d6f9aff9 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -278,7 +278,16 @@ err_nat:
return err;
}
+static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+{
+ return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
+ test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
+ !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
+ !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+}
+
static struct nf_flowtable_type flowtable_ct = {
+ .gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
.owner = THIS_MODULE,
};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index da4c179a4d41..6663e971a13e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -366,7 +366,7 @@ static int u32_init(struct tcf_proto *tp)
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
- tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
+ tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3554085bc2be..880c5f16b29c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -902,6 +902,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
cl->cl_flags |= HFSC_USC;
}
+static void
+hfsc_upgrade_rt(struct hfsc_class *cl)
+{
+ cl->cl_fsc = cl->cl_rsc;
+ rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
+ cl->cl_flags |= HFSC_FSC;
+}
+
static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
[TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
[TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
@@ -1011,10 +1019,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (parent == NULL)
return -ENOENT;
}
- if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
- NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
- return -EINVAL;
- }
if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
return -EINVAL;
@@ -1065,6 +1069,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->cf_tree = RB_ROOT;
sch_tree_lock(sch);
+ /* Check if the inner class is a misconfigured 'rt' */
+ if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+ NL_SET_ERR_MSG(extack,
+ "Forced curve change on parent 'rt' to 'sc'");
+ hfsc_upgrade_rt(parent);
+ }
qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
list_add_tail(&cl->siblings, &parent->children);
if (parent->level == 0)
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index 1ab3c5a2c5ad..746be3996768 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -2,6 +2,7 @@
config SMC
tristate "SMC socket protocol family"
depends on INET && INFINIBAND
+ depends on m || ISM != m
help
SMC-R provides a "sockets over RDMA" solution making use of
RDMA over Converged Ethernet (RoCE) technology to upgrade
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index bacdd971615e..35ddebae8894 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1201,6 +1201,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
(struct smc_clc_msg_accept_confirm_v2 *)aclc;
struct smc_clc_first_contact_ext *fce =
smc_get_clc_first_contact_ext(clc_v2, false);
+ struct net *net = sock_net(&smc->sk);
int rc;
if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
@@ -1210,7 +1211,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
ini->smcrv2.uses_gateway = false;
} else {
- if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
+ if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
ini->smcrv2.nexthop_mac,
&ini->smcrv2.uses_gateway))
@@ -2361,7 +2362,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
smc_find_ism_store_rc(rc, ini);
return (!rc) ? 0 : ini->rc;
}
- return SMC_CLC_DECL_NOSMCDEV;
+ return prfx_rc;
}
/* listen worker: finish RDMA setup */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 9b66d6aeeb1a..89981dbe46c9 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -193,7 +193,7 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
}
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
u8 nexthop_mac[], u8 *uses_gateway)
{
struct neighbour *neigh = NULL;
@@ -205,7 +205,7 @@ int smc_ib_find_route(__be32 saddr, __be32 daddr,
if (daddr == cpu_to_be32(INADDR_NONE))
goto out;
- rt = ip_route_output_flow(&init_net, &fl4, NULL);
+ rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto out;
if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
@@ -235,6 +235,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
struct in_device *in_dev = __in_dev_get_rcu(ndev);
+ struct net *net = dev_net(ndev);
const struct in_ifaddr *ifa;
bool subnet_match = false;
@@ -248,7 +249,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
}
if (!subnet_match)
goto out;
- if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
+ if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr,
smcrv2->daddr,
smcrv2->nexthop_mac,
&smcrv2->uses_gateway))
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 4df5f8c8a0a1..ef8ac2b7546d 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -112,7 +112,7 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
unsigned short vlan_id, u8 gid[], u8 *sgid_index,
struct smc_init_info_smcrv2 *smcrv2);
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
u8 nexthop_mac[], u8 *uses_gateway);
bool smc_ib_is_valid_local_systemid(void);
int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
index aa8928975cc6..9d32058db2b5 100644
--- a/net/smc/smc_stats.h
+++ b/net/smc/smc_stats.h
@@ -92,13 +92,14 @@ do { \
typeof(_smc_stats) stats = (_smc_stats); \
typeof(_tech) t = (_tech); \
typeof(_len) l = (_len); \
- int _pos = fls64((l) >> 13); \
+ int _pos; \
typeof(_rc) r = (_rc); \
int m = SMC_BUF_MAX - 1; \
this_cpu_inc((*stats).smc[t].key ## _cnt); \
- if (r <= 0) \
+ if (r <= 0 || l <= 0) \
break; \
- _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ _pos = fls64((l - 1) >> 13); \
+ _pos = (_pos <= m) ? _pos : m; \
this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
this_cpu_add((*stats).smc[t].key ## _bytes, r); \
} \
@@ -138,9 +139,12 @@ while (0)
do { \
typeof(_len) _l = (_len); \
typeof(_tech) t = (_tech); \
- int _pos = fls((_l) >> 13); \
+ int _pos; \
int m = SMC_BUF_MAX - 1; \
- _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ if (_l <= 0) \
+ break; \
+ _pos = fls((_l - 1) >> 13); \
+ _pos = (_pos <= m) ? _pos : m; \
this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
} \
while (0)
diff --git a/net/socket.c b/net/socket.c
index c4a6f5532955..5740475e084c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -403,7 +403,7 @@ static const struct xattr_handler sockfs_security_xattr_handler = {
.set = sockfs_security_xattr_set,
};
-static const struct xattr_handler *sockfs_xattr_handlers[] = {
+static const struct xattr_handler * const sockfs_xattr_handlers[] = {
&sockfs_xattr_handler,
&sockfs_security_xattr_handler,
NULL
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 65a6c6429a53..caa94cf57123 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
return NULL;
req->rq_xprt = xprt;
- INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
@@ -349,10 +348,8 @@ found:
}
/*
- * Add callback request to callback list. The callback
- * service sleeps on the sv_cb_waitq waiting for new
- * requests. Wake it up after adding enqueing the
- * request.
+ * Add callback request to callback list. Wake a thread
+ * on the first pool (usually the only pool) to handle it.
*/
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
@@ -369,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
dprintk("RPC: add callback request to list\n");
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
- wake_up(&bc_serv->sv_cb_waitq);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index f420d8457345..dcc2b4f49e77 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -472,7 +472,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 812fda9d45dd..3f2ea7a0496f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -438,9 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
static void
__svc_init_bc(struct svc_serv *serv)
{
- INIT_LIST_HEAD(&serv->sv_cb_list);
- spin_lock_init(&serv->sv_cb_lock);
- init_waitqueue_head(&serv->sv_cb_waitq);
+ lwq_init(&serv->sv_cb_list);
}
#else
static void
@@ -509,9 +507,9 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_sockets);
+ lwq_init(&pool->sp_xprts);
INIT_LIST_HEAD(&pool->sp_all_threads);
- spin_lock_init(&pool->sp_lock);
+ init_llist_head(&pool->sp_idle_threads);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
@@ -575,11 +573,12 @@ svc_destroy(struct kref *ref)
timer_shutdown_sync(&serv->sv_temptimer);
/*
- * The last user is gone and thus all sockets have to be destroyed to
- * the point. Check this.
+ * Remaining transports at this point are not expected.
*/
- BUG_ON(!list_empty(&serv->sv_permsocks));
- BUG_ON(!list_empty(&serv->sv_tempsocks));
+ WARN_ONCE(!list_empty(&serv->sv_permsocks),
+ "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
+ WARN_ONCE(!list_empty(&serv->sv_tempsocks),
+ "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
cache_clean_deferred(serv);
@@ -642,7 +641,6 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
folio_batch_init(&rqstp->rq_fbatch);
- __set_bit(RQ_BUSY, &rqstp->rq_flags);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -682,10 +680,13 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
serv->sv_nrthreads += 1;
spin_unlock_bh(&serv->sv_lock);
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads++;
+ atomic_inc(&pool->sp_nrthreads);
+
+ /* Protected by whatever lock the service uses when calling
+ * svc_set_num_threads()
+ */
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
- spin_unlock_bh(&pool->sp_lock);
+
return rqstp;
}
@@ -701,23 +702,25 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
void svc_pool_wake_idle_thread(struct svc_pool *pool)
{
struct svc_rqst *rqstp;
+ struct llist_node *ln;
rcu_read_lock();
- list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
- if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
- continue;
-
+ ln = READ_ONCE(pool->sp_idle_threads.first);
+ if (ln) {
+ rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
- wake_up_process(rqstp->rq_task);
+ if (!task_is_running(rqstp->rq_task)) {
+ wake_up_process(rqstp->rq_task);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ percpu_counter_inc(&pool->sp_threads_woken);
+ }
rcu_read_unlock();
- percpu_counter_inc(&pool->sp_threads_woken);
- trace_svc_wake_up(rqstp->rq_task->pid);
return;
}
rcu_read_unlock();
- set_bit(SP_CONGESTED, &pool->sp_flags);
}
+EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
static struct svc_pool *
svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
@@ -725,36 +728,38 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
}
-static struct task_struct *
-svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+static struct svc_pool *
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+ unsigned int *state)
{
+ struct svc_pool *pool;
unsigned int i;
- struct task_struct *task = NULL;
+
+retry:
+ pool = target_pool;
if (pool != NULL) {
- spin_lock_bh(&pool->sp_lock);
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
+ goto found_pool;
+ return NULL;
} else {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_all_threads))
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
}
return NULL;
}
found_pool:
- if (!list_empty(&pool->sp_all_threads)) {
- struct svc_rqst *rqstp;
-
- rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
- set_bit(RQ_VICTIM, &rqstp->rq_flags);
- list_del_rcu(&rqstp->rq_all);
- task = rqstp->rq_task;
- }
- spin_unlock_bh(&pool->sp_lock);
- return task;
+ set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ return pool;
+ /* Nothing left in this pool any more */
+ clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ goto retry;
}
static int
@@ -795,18 +800,16 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
static int
svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- struct svc_rqst *rqstp;
- struct task_struct *task;
unsigned int state = serv->sv_nrthreads-1;
+ struct svc_pool *victim;
do {
- task = svc_pool_victim(serv, pool, &state);
- if (task == NULL)
+ victim = svc_pool_victim(serv, pool, &state);
+ if (!victim)
break;
- rqstp = kthread_data(task);
- /* Did we lose a race to svo_function threadfn? */
- if (kthread_stop(task) == -EINTR)
- svc_exit_thread(rqstp);
+ svc_pool_wake_idle_thread(victim);
+ wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS,
+ TASK_IDLE);
nrservs++;
} while (nrservs < 0);
return 0;
@@ -832,13 +835,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- if (pool == NULL) {
+ if (!pool)
nrservs -= serv->sv_nrthreads;
- } else {
- spin_lock_bh(&pool->sp_lock);
- nrservs -= pool->sp_nrthreads;
- spin_unlock_bh(&pool->sp_lock);
- }
+ else
+ nrservs -= atomic_read(&pool->sp_nrthreads);
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -924,11 +924,9 @@ svc_exit_thread(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads--;
- if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
- list_del_rcu(&rqstp->rq_all);
- spin_unlock_bh(&pool->sp_lock);
+ list_del_rcu(&rqstp->rq_all);
+
+ atomic_dec(&pool->sp_nrthreads);
spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads -= 1;
@@ -938,6 +936,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
svc_rqst_free(rqstp);
svc_put(serv);
+ /* That svc_put() cannot be the last, because the thread
+ * waiting for SP_VICTIM_REMAINS to clear must hold
+ * a reference. So it is still safe to access pool.
+ */
+ clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
}
EXPORT_SYMBOL_GPL(svc_exit_thread);
@@ -1544,24 +1547,20 @@ out_drop:
}
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
-/*
- * Process a backchannel RPC request that arrived over an existing
- * outbound connection
+/**
+ * svc_process_bc - process a reverse-direction RPC request
+ * @req: RPC request to be used for client-side processing
+ * @rqstp: server-side execution context
+ *
*/
-int
-bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
- struct svc_rqst *rqstp)
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
struct rpc_task *task;
int proc_error;
- int error;
-
- dprintk("svc: %s(%p)\n", __func__, req);
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
- rqstp->rq_server = serv;
rqstp->rq_bc_net = req->rq_xprt->xprt_net;
rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
@@ -1590,10 +1589,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
* been processed by the caller.
*/
svcxdr_init_decode(rqstp);
- if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) {
- error = -EINVAL;
- goto out;
- }
+ if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2))
+ return;
/* Parse and execute the bc call */
proc_error = svc_process_common(rqstp);
@@ -1602,26 +1599,18 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
if (!proc_error) {
/* Processing error: drop the request */
xprt_free_bc_request(req);
- error = -EINVAL;
- goto out;
+ return;
}
/* Finally, send the reply synchronously */
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req);
- if (IS_ERR(task)) {
- error = PTR_ERR(task);
- goto out;
- }
+ if (IS_ERR(task))
+ return;
WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
- error = task->tk_status;
rpc_put_task(task);
-
-out:
- dprintk("svc: %s(), error=%d\n", __func__, error);
- return error;
}
-EXPORT_SYMBOL_GPL(bc_svc_process);
+EXPORT_SYMBOL_GPL(svc_process_bc);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/**
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4cfe9640df48..fee83d1024bc 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -9,7 +9,6 @@
#include <linux/sched/mm.h>
#include <linux/errno.h>
#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/sunrpc/addr.h>
@@ -17,6 +16,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/bc_xprt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <trace/events/sunrpc.h>
@@ -201,7 +201,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
kref_init(&xprt->xpt_ref);
xprt->xpt_server = serv;
INIT_LIST_HEAD(&xprt->xpt_list);
- INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
INIT_LIST_HEAD(&xprt->xpt_users);
mutex_init(&xprt->xpt_mutex);
@@ -472,9 +471,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server);
percpu_counter_inc(&pool->sp_sockets_queued);
- spin_lock_bh(&pool->sp_lock);
- list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
- spin_unlock_bh(&pool->sp_lock);
+ lwq_enqueue(&xprt->xpt_ready, &pool->sp_xprts);
svc_pool_wake_idle_thread(pool);
}
@@ -487,18 +484,9 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
struct svc_xprt *xprt = NULL;
- if (list_empty(&pool->sp_sockets))
- goto out;
-
- spin_lock_bh(&pool->sp_lock);
- if (likely(!list_empty(&pool->sp_sockets))) {
- xprt = list_first_entry(&pool->sp_sockets,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ xprt = lwq_dequeue(&pool->sp_xprts, struct svc_xprt, xpt_ready);
+ if (xprt)
svc_xprt_get(xprt);
- }
- spin_unlock_bh(&pool->sp_lock);
-out:
return xprt;
}
@@ -674,7 +662,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
continue;
set_current_state(TASK_IDLE);
- if (kthread_should_stop()) {
+ if (svc_thread_should_stop(rqstp)) {
set_current_state(TASK_RUNNING);
return false;
}
@@ -699,7 +687,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
}
static bool
-rqst_should_sleep(struct svc_rqst *rqstp)
+svc_thread_should_sleep(struct svc_rqst *rqstp)
{
struct svc_pool *pool = rqstp->rq_pool;
@@ -708,65 +696,51 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return false;
/* was a socket queued? */
- if (!list_empty(&pool->sp_sockets))
+ if (!lwq_empty(&pool->sp_xprts))
return false;
/* are we shutting down? */
- if (kthread_should_stop())
+ if (svc_thread_should_stop(rqstp))
return false;
- /* are we freezing? */
- if (freezing(current))
- return false;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (svc_is_backchannel(rqstp)) {
+ if (!lwq_empty(&rqstp->rq_server->sv_cb_list))
+ return false;
+ }
+#endif
return true;
}
-static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp)
+static void svc_thread_wait_for_work(struct svc_rqst *rqstp)
{
- struct svc_pool *pool = rqstp->rq_pool;
-
- /* rq_xprt should be clear on entry */
- WARN_ON_ONCE(rqstp->rq_xprt);
-
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- set_current_state(TASK_IDLE);
- smp_mb__before_atomic();
- clear_bit(SP_CONGESTED, &pool->sp_flags);
- clear_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
-
- if (likely(rqst_should_sleep(rqstp)))
- schedule();
- else
+ struct svc_pool *pool = rqstp->rq_pool;
+
+ if (svc_thread_should_sleep(rqstp)) {
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ llist_add(&rqstp->rq_idle, &pool->sp_idle_threads);
+ if (likely(svc_thread_should_sleep(rqstp)))
+ schedule();
+
+ while (!llist_del_first_this(&pool->sp_idle_threads,
+ &rqstp->rq_idle)) {
+ /* Work just became available. This thread can only
+ * handle it after removing rqstp from the idle
+ * list. If that attempt failed, some other thread
+ * must have queued itself after finding no
+ * work to do, so that thread has taken responsibly
+ * for this new work. This thread can safely sleep
+ * until woken again.
+ */
+ schedule();
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ }
__set_current_state(TASK_RUNNING);
-
+ } else {
+ cond_resched();
+ }
try_to_freeze();
-
- set_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- if (kthread_should_stop())
- return NULL;
- return NULL;
-out_found:
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- /* Normally we will wait up to 5 seconds for any required
- * cache information to be provided.
- */
- if (!test_bit(SP_CONGESTED, &pool->sp_flags))
- rqstp->rq_chandle.thread_wait = 5*HZ;
- else
- rqstp->rq_chandle.thread_wait = 1*HZ;
- trace_svc_xprt_dequeue(rqstp);
- return rqstp->rq_xprt;
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -785,7 +759,7 @@ static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt
svc_xprt_received(newxpt);
}
-static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+static void svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
{
struct svc_serv *serv = rqstp->rq_server;
int len = 0;
@@ -826,11 +800,35 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ if (len <= 0)
+ goto out;
+
+ trace_svc_xdr_recvfrom(&rqstp->rq_arg);
+
+ clear_bit(XPT_OLD, &xprt->xpt_flags);
+
+ rqstp->rq_chandle.defer = svc_defer;
+
+ if (serv->sv_stats)
+ serv->sv_stats->netcnt++;
+ percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
+ rqstp->rq_stime = ktime_get();
+ svc_process(rqstp);
} else
svc_xprt_received(xprt);
out:
- return len;
+ rqstp->rq_res.len = 0;
+ svc_xprt_release(rqstp);
+}
+
+static void svc_thread_wake_next(struct svc_rqst *rqstp)
+{
+ if (!svc_thread_should_sleep(rqstp))
+ /* More work pending after I dequeued some,
+ * wake another worker
+ */
+ svc_pool_wake_idle_thread(rqstp->rq_pool);
}
/**
@@ -843,44 +841,51 @@ out:
*/
void svc_recv(struct svc_rqst *rqstp)
{
- struct svc_xprt *xprt = NULL;
- struct svc_serv *serv = rqstp->rq_server;
- int len;
+ struct svc_pool *pool = rqstp->rq_pool;
if (!svc_alloc_arg(rqstp))
- goto out;
+ return;
- try_to_freeze();
- cond_resched();
- if (kthread_should_stop())
- goto out;
+ svc_thread_wait_for_work(rqstp);
- xprt = svc_get_next_xprt(rqstp);
- if (!xprt)
- goto out;
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- len = svc_handle_xprt(rqstp, xprt);
+ if (svc_thread_should_stop(rqstp)) {
+ svc_thread_wake_next(rqstp);
+ return;
+ }
- /* No data, incomplete (TCP) read, or accept() */
- if (len <= 0)
- goto out_release;
+ rqstp->rq_xprt = svc_xprt_dequeue(pool);
+ if (rqstp->rq_xprt) {
+ struct svc_xprt *xprt = rqstp->rq_xprt;
- trace_svc_xdr_recvfrom(&rqstp->rq_arg);
+ svc_thread_wake_next(rqstp);
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided. When there are no
+ * idle threads, we reduce the wait time.
+ */
+ if (pool->sp_idle_threads.first)
+ rqstp->rq_chandle.thread_wait = 5 * HZ;
+ else
+ rqstp->rq_chandle.thread_wait = 1 * HZ;
- clear_bit(XPT_OLD, &xprt->xpt_flags);
+ trace_svc_xprt_dequeue(rqstp);
+ svc_handle_xprt(rqstp, xprt);
+ }
- rqstp->rq_chandle.defer = svc_defer;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (svc_is_backchannel(rqstp)) {
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
- if (serv->sv_stats)
- serv->sv_stats->netcnt++;
- percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
- rqstp->rq_stime = ktime_get();
- svc_process(rqstp);
-out:
- return;
-out_release:
- rqstp->rq_res.len = 0;
- svc_xprt_release(rqstp);
+ req = lwq_dequeue(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ if (req) {
+ svc_thread_wake_next(rqstp);
+ svc_process_bc(req, rqstp);
+ }
+ }
+#endif
}
EXPORT_SYMBOL_GPL(svc_recv);
@@ -890,7 +895,6 @@ EXPORT_SYMBOL_GPL(svc_recv);
void svc_drop(struct svc_rqst *rqstp)
{
trace_svc_drop(rqstp);
- svc_xprt_release(rqstp);
}
EXPORT_SYMBOL_GPL(svc_drop);
@@ -906,8 +910,6 @@ void svc_send(struct svc_rqst *rqstp)
int status;
xprt = rqstp->rq_xprt;
- if (!xprt)
- return;
/* calculate over-all length */
xb = &rqstp->rq_res;
@@ -920,7 +922,6 @@ void svc_send(struct svc_rqst *rqstp)
status = xprt->xpt_ops->xpo_sendto(rqstp);
trace_svc_send(rqstp, status);
- svc_xprt_release(rqstp);
}
/*
@@ -1031,7 +1032,6 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
list_del_init(&xprt->xpt_list);
- WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
@@ -1082,36 +1082,26 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
return ret;
}
-static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
{
- struct svc_pool *pool;
struct svc_xprt *xprt;
- struct svc_xprt *tmp;
int i;
for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
-
- spin_lock_bh(&pool->sp_lock);
- list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
- if (xprt->xpt_net != net)
- continue;
- list_del_init(&xprt->xpt_ready);
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ struct svc_pool *pool = &serv->sv_pools[i];
+ struct llist_node *q, **t1, *t2;
+
+ q = lwq_dequeue_all(&pool->sp_xprts);
+ lwq_for_each_safe(xprt, t1, t2, &q, xpt_ready) {
+ if (xprt->xpt_net == net) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_delete_xprt(xprt);
+ xprt = NULL;
+ }
}
- spin_unlock_bh(&pool->sp_lock);
- }
- return NULL;
-}
-
-static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
-{
- struct svc_xprt *xprt;
- while ((xprt = svc_dequeue_net(serv, net))) {
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- svc_delete_xprt(xprt);
+ if (q)
+ lwq_enqueue_batch(q, &pool->sp_xprts);
}
}
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index e4d84a13c566..8c817e755262 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -263,11 +263,9 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- wake_up(&bc_serv->sv_cb_waitq);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
r_xprt->rx_stats.bcall_count++;
return;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 85c8bcaebb80..3b05f90a3e50 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -852,7 +852,8 @@ out_readfail:
if (ret == -EINVAL)
svc_rdma_send_error(rdma_xprt, ctxt, ret);
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
- return ret;
+ svc_xprt_deferred_close(xprt);
+ return -ENOTCONN;
out_backchannel:
svc_rdma_handle_bc_reply(rqstp, ctxt);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 02f583ff9239..002483e60c19 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -139,8 +139,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
int wait_on_pending_writer(struct sock *sk, long *timeo)
{
- int rc = 0;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret, rc = 0;
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
@@ -154,9 +154,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
break;
}
- if (sk_wait_event(sk, timeo,
- !READ_ONCE(sk->sk_write_pending), &wait))
+ ret = sk_wait_event(sk, timeo,
+ !READ_ONCE(sk->sk_write_pending), &wait);
+ if (ret) {
+ if (ret < 0)
+ rc = ret;
break;
+ }
}
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d1fc295b83b5..e9d1e83a859d 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1291,6 +1291,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
long timeo;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1302,6 +1303,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
if (sk->sk_err)
return sock_error(sk);
+ if (ret < 0)
+ return ret;
+
if (!skb_queue_empty(&sk->sk_receive_queue)) {
tls_strp_check_rcv(&ctx->strp);
if (tls_strp_msg_ready(ctx))
@@ -1320,10 +1324,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
released = true;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- sk_wait_event(sk, &timeo,
- tls_strp_msg_ready(ctx) ||
- !sk_psock_queue_empty(psock),
- &wait);
+ ret = sk_wait_event(sk, &timeo,
+ tls_strp_msg_ready(ctx) ||
+ !sk_psock_queue_empty(psock),
+ &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -1852,6 +1856,7 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
long timeo;
+ int ret;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1861,14 +1866,16 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
ctx->reader_contended = 1;
add_wait_queue(&ctx->wq, &wait);
- sk_wait_event(sk, &timeo,
- !READ_ONCE(ctx->reader_present), &wait);
+ ret = sk_wait_event(sk, &timeo,
+ !READ_ONCE(ctx->reader_present), &wait);
remove_wait_queue(&ctx->wq, &wait);
if (timeo <= 0)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(timeo);
+ if (ret < 0)
+ return ret;
}
WRITE_ONCE(ctx->reader_present, 1);
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e95df847176b..b80bf681327b 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -555,6 +555,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
virtio_device_ready(vdev);
+ return 0;
+}
+
+static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+{
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
@@ -569,7 +574,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
- return 0;
+ /* virtio_transport_send_pkt() can queue packets once
+ * the_virtio_vsock is set, but they won't be processed until
+ * vsock->tx_run is set to true. We queue vsock->send_pkt_work
+ * when initialization finishes to send those packets queued
+ * earlier.
+ * We don't need to queue the other workers (rx, event) because
+ * as long as we don't fill the queues with empty buffers, the
+ * host can't send us any notification.
+ */
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
}
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
@@ -664,6 +678,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -736,6 +751,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
out:
mutex_unlock(&the_virtio_vsock_mutex);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 64e861617110..acec41c1809a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1622,7 +1622,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
list_add_tail(&work->entry, &rdev->wiphy_work_list);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
}
EXPORT_SYMBOL_GPL(wiphy_work_queue);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 3e2c398abddc..55a1d3633853 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
cr.links[link_id].status = data->links[link_id].status;
+ cr.links[link_id].bss = data->links[link_id].bss;
+
WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
(!cr.ap_mld_addr || !cr.links[link_id].bss));
- cr.links[link_id].bss = data->links[link_id].bss;
if (!cr.links[link_id].bss)
continue;
cr.links[link_id].bssid = data->links[link_id].bss->bssid;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 939deecf0bbe..8210a6090ac1 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2125,7 +2125,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!res)
goto drop;
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
+ rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
if (data->bss_source == BSS_SOURCE_MBSSID) {
/* this is a nontransmitting bss, we need to add it to
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index f8905400ee07..d2c264030017 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -34,6 +34,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
q->ring_mask = nentries - 1;
size = xskq_get_ring_size(q, umem_queue);
+
+ /* size which is overflowing or close to SIZE_MAX will become 0 in
+ * PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
+ * is_power_of_2(), the rest will be handled by vmalloc_user()
+ */
+ if (unlikely(size == SIZE_MAX)) {
+ kfree(q);
+ return NULL;
+ }
+
size = PAGE_ALIGN(size);
q->ring = vmalloc_user(size);
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index b86474084690..e21cc71095bb 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -380,8 +380,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
skb->dev = dev;
if (err) {
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_dropped);
return 0;
}
@@ -426,7 +426,6 @@ static int
xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
unsigned int length = skb->len;
struct net_device *tdev;
@@ -473,7 +472,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
tdev = dst->dev;
if (tdev == dev) {
- stats->collisions++;
+ DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n",
dev->name);
goto tx_err_dst_release;
@@ -512,13 +511,13 @@ xmit:
if (net_xmit_eval(err) == 0) {
dev_sw_netstats_tx_add(dev, 1, length);
} else {
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
}
return 0;
tx_err_link_failure:
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
@@ -528,7 +527,6 @@ tx_err_dst_release:
static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct flowi fl;
int ret;
@@ -545,7 +543,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
if (dst->error) {
dst_release(dst);
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, dst);
@@ -561,7 +559,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
if (IS_ERR(rt)) {
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, &rt->dst);
@@ -580,8 +578,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
tx_err:
- stats->tx_errors++;
- stats->tx_dropped++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d6b405782b63..d24b4d4f620e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -851,7 +851,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
struct hlist_node *newpos = NULL;
bool matches_s, matches_d;
- if (!policy->bydst_reinsert)
+ if (policy->walk.dead || !policy->bydst_reinsert)
continue;
WARN_ON_ONCE(policy->family != family);
@@ -1256,8 +1256,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
+ if (policy->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(policy->index);
- if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
+ if (dir >= XFRM_POLICY_MAX)
continue;
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
@@ -1372,8 +1375,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
* of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
- static u32 idx_generator;
-
for (;;) {
struct hlist_head *list;
struct xfrm_policy *p;
@@ -1381,8 +1382,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
int found;
if (!index) {
- idx = (idx_generator | dir);
- idx_generator += 8;
+ idx = (net->xfrm.idx_generator | dir);
+ net->xfrm.idx_generator += 8;
} else {
idx = index;
index = 0;
@@ -1823,9 +1824,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(pol->index);
- if (pol->walk.dead ||
- dir >= XFRM_POLICY_MAX ||
+ if (dir >= XFRM_POLICY_MAX ||
pol->type != type)
continue;
@@ -1862,9 +1865,11 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(pol->index);
- if (pol->walk.dead ||
- dir >= XFRM_POLICY_MAX ||
+ if (dir >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
@@ -3215,7 +3220,7 @@ no_transform:
}
for (i = 0; i < num_pols; i++)
- pols[i]->curlft.use_time = ktime_get_real_seconds();
+ WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
if (num_xfrms < 0) {
/* Prohibit the flow */