summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c2
-rw-r--r--net/802/mrp.c4
-rw-r--r--net/9p/client.c33
-rw-r--r--net/9p/trans_fd.c14
-rw-r--r--net/9p/trans_rdma.c5
-rw-r--r--net/9p/trans_virtio.c10
-rw-r--r--net/9p/trans_xen.c8
-rw-r--r--net/atm/atm_sysfs.c4
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c3
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/bridge/br_multicast_eht.c4
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c1
-rw-r--r--net/core/stream.c6
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/tcp_plb.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mctp/device.c14
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/nf_flow_table_offload.c6
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/netfilter/xt_LED.c2
-rw-r--r--net/openvswitch/datapath.c25
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/call_accept.c18
-rw-r--r--net/rxrpc/call_object.c1
-rw-r--r--net/rxrpc/conn_client.c2
-rw-r--r--net/rxrpc/io_thread.c10
-rw-r--r--net/rxrpc/local_object.c5
-rw-r--r--net/rxrpc/peer_event.c10
-rw-r--r--net/rxrpc/peer_object.c23
-rw-r--r--net/rxrpc/rxperf.c2
-rw-r--r--net/rxrpc/security.c6
-rw-r--r--net/rxrpc/sendmsg.c2
-rw-r--r--net/sched/cls_flow.c2
-rw-r--r--net/sched/ematch.c2
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/sysfs.c8
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/xfrm/espintcp.c1
54 files changed, 180 insertions, 149 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 77aac2763835..ab24b21fbb49 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -618,7 +618,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
- del_timer_sync(&app->join_timer);
+ timer_shutdown_sync(&app->join_timer);
spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 66fcbf23b486..eafc21ecc287 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -911,8 +911,8 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
/* Delete timer and generate a final TX event to flush out
* all pending messages before the applicant is gone.
*/
- del_timer_sync(&app->join_timer);
- del_timer_sync(&app->periodic_timer);
+ timer_shutdown_sync(&app->join_timer);
+ timer_shutdown_sync(&app->periodic_timer);
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
diff --git a/net/9p/client.c b/net/9p/client.c
index 0638b12055ba..622ec6a586ee 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -297,6 +297,11 @@ p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size,
p9pdu_reset(&req->rc);
req->t_err = 0;
req->status = REQ_STATUS_ALLOC;
+ /* refcount needs to be set to 0 before inserting into the idr
+ * so p9_tag_lookup does not accept a request that is not fully
+ * initialized. refcount_set to 2 below will mark request ready.
+ */
+ refcount_set(&req->refcount, 0);
init_waitqueue_head(&req->wq);
INIT_LIST_HEAD(&req->req_list);
@@ -438,7 +443,7 @@ void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
* the status change is visible to another thread
*/
smp_wmb();
- req->status = status;
+ WRITE_ONCE(req->status, status);
wake_up(&req->wq);
p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
@@ -514,10 +519,9 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
int ecode;
err = p9_parse_header(&req->rc, NULL, &type, NULL, 0);
- if (req->rc.size >= c->msize) {
- p9_debug(P9_DEBUG_ERROR,
- "requested packet size too big: %d\n",
- req->rc.size);
+ if (req->rc.size > req->rc.capacity && !req->rc.zc) {
+ pr_err("requested packet size too big: %d does not fit %zu (type=%d)\n",
+ req->rc.size, req->rc.capacity, req->rc.id);
return -EIO;
}
/* dump the response from server
@@ -600,7 +604,7 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
/* if we haven't received a response for oldreq,
* remove it from the list
*/
- if (oldreq->status == REQ_STATUS_SENT) {
+ if (READ_ONCE(oldreq->status) == REQ_STATUS_SENT) {
if (c->trans_mod->cancelled)
c->trans_mod->cancelled(c, oldreq);
}
@@ -680,6 +684,9 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if (IS_ERR(req))
return req;
+ req->tc.zc = false;
+ req->rc.zc = false;
+
if (signal_pending(current)) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
@@ -697,7 +704,8 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
}
again:
/* Wait for the response */
- err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(req->wq,
+ READ_ONCE(req->status) >= REQ_STATUS_RCVD);
/* Make sure our req is coherent with regard to updates in other
* threads - echoes to wmb() in the callback
@@ -711,7 +719,7 @@ again:
goto again;
}
- if (req->status == REQ_STATUS_ERROR) {
+ if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
@@ -724,7 +732,7 @@ again:
p9_client_flush(c, req);
/* if we received the response anyway, don't signal error */
- if (req->status == REQ_STATUS_RCVD)
+ if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
err = 0;
}
recalc_sigpending:
@@ -778,6 +786,9 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
if (IS_ERR(req))
return req;
+ req->tc.zc = true;
+ req->rc.zc = true;
+
if (signal_pending(current)) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
@@ -793,7 +804,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
if (err != -ERESTARTSYS)
goto recalc_sigpending;
}
- if (req->status == REQ_STATUS_ERROR) {
+ if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
@@ -806,7 +817,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
p9_client_flush(c, req);
/* if we received the response anyway, don't signal error */
- if (req->status == REQ_STATUS_RCVD)
+ if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
err = 0;
}
recalc_sigpending:
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 07db2f436d44..00b684616e8d 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -20,7 +20,6 @@
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/slab.h>
@@ -202,11 +201,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
}
spin_unlock(&m->req_lock);
@@ -467,7 +466,7 @@ static void p9_write_work(struct work_struct *work)
req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req_list);
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
list_move_tail(&req->req_list, &m->req_list);
@@ -676,7 +675,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
return m->err;
spin_lock(&m->req_lock);
- req->status = REQ_STATUS_UNSENT;
+ WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->req_lock);
@@ -703,7 +702,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
if (req->status == REQ_STATUS_UNSENT) {
list_del(&req->req_list);
- req->status = REQ_STATUS_FLSHD;
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
p9_req_put(client, req);
ret = 0;
}
@@ -732,7 +731,7 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
* remove it from the list.
*/
list_del(&req->req_list);
- req->status = REQ_STATUS_FLSHD;
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
spin_unlock(&m->req_lock);
p9_req_put(client, req);
@@ -868,6 +867,7 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
}
csocket->sk->sk_allocation = GFP_NOIO;
+ csocket->sk->sk_use_task_frag = false;
file = sock_alloc_file(csocket, 0, NULL);
if (IS_ERR(file)) {
pr_err("%s (%d): failed to map fd\n",
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 6ff706760676..83f9100d46bf 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -21,7 +21,6 @@
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/semaphore.h>
@@ -507,7 +506,7 @@ dont_need_post_recv:
* because doing if after could erase the REQ_STATUS_RCVD
* status in case of a very fast reply.
*/
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
err = ib_post_send(rdma->qp, &wr, NULL);
if (err)
goto send_error;
@@ -517,7 +516,7 @@ dont_need_post_recv:
/* Handle errors that happened during or while preparing the send: */
send_error:
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
kfree(c);
p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e757f0601304..3c27ffb781e3 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -22,7 +22,6 @@
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
@@ -263,7 +262,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
req_retry:
spin_lock_irqsave(&chan->lock, flags);
@@ -469,7 +468,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
inlen = n;
}
}
- req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(req->status, REQ_STATUS_SENT);
req_retry_pinned:
spin_lock_irqsave(&chan->lock, flags);
@@ -532,9 +531,10 @@ req_retry_pinned:
spin_unlock_irqrestore(&chan->lock, flags);
kicked = 1;
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
- err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(req->wq,
+ READ_ONCE(req->status) >= REQ_STATUS_RCVD);
// RERROR needs reply (== error string) in static data
- if (req->status == REQ_STATUS_RCVD &&
+ if (READ_ONCE(req->status) == REQ_STATUS_RCVD &&
unlikely(req->rc.sdata[4] == P9_RERROR))
handle_rerror(req, in_hdr_len, offs, in_pages);
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index aaa5fd364691..9630b1275557 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -157,7 +157,7 @@ again:
&masked_prod, masked_cons,
XEN_9PFS_RING_SIZE(ring));
- p9_req->status = REQ_STATUS_SENT;
+ WRITE_ONCE(p9_req->status, REQ_STATUS_SENT);
virt_wmb(); /* write ring before updating pointer */
prod += size;
ring->intf->out_prod = prod;
@@ -212,11 +212,13 @@ static void p9_xen_response(struct work_struct *work)
dev_warn(&priv->dev->dev,
"requested packet size too big: %d for tag %d with capacity %zd\n",
h.size, h.tag, req->rc.capacity);
- req->status = REQ_STATUS_ERROR;
+ WRITE_ONCE(req->status, REQ_STATUS_ERROR);
goto recv_error;
}
- memcpy(&req->rc, &h, sizeof(h));
+ req->rc.size = h.size;
+ req->rc.id = h.id;
+ req->rc.tag = h.tag;
req->rc.offset = 0;
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index 0fdbdfd19474..466353b3dde4 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -108,9 +108,9 @@ static struct device_attribute *atm_attrs[] = {
};
-static int atm_uevent(struct device *cdev, struct kobj_uevent_env *env)
+static int atm_uevent(const struct device *cdev, struct kobj_uevent_env *env)
{
- struct atm_dev *adev;
+ const struct atm_dev *adev;
if (!cdev)
return -ENODEV;
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 2d434c1f4617..1ac4467928a9 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -124,8 +124,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
if (err < 0)
goto out;
- set_memory_ro((long)image, 1);
- set_memory_x((long)image, 1);
+ set_memory_rox((long)image, 1);
prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 228fd5b20f10..ad13b48e3e08 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -262,7 +262,7 @@ static void release_nbp(struct kobject *kobj)
kfree(p);
}
-static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+static void brport_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
struct net_bridge_port *p = kobj_to_brport(kobj);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 48170bd3785e..dea1ee1bd095 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -606,7 +606,7 @@ static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
WARN_ON(!hlist_unhashed(&mp->mdb_node));
WARN_ON(mp->ports);
- del_timer_sync(&mp->timer);
+ timer_shutdown_sync(&mp->timer);
kfree_rcu(mp, rcu);
}
@@ -647,7 +647,7 @@ static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
src = container_of(gc, struct net_bridge_group_src, mcast_gc);
WARN_ON(!hlist_unhashed(&src->node));
- del_timer_sync(&src->timer);
+ timer_shutdown_sync(&src->timer);
kfree_rcu(src, rcu);
}
@@ -676,8 +676,8 @@ static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
WARN_ON(!hlist_unhashed(&pg->mglist));
WARN_ON(!hlist_empty(&pg->src_list));
- del_timer_sync(&pg->rexmit_timer);
- del_timer_sync(&pg->timer);
+ timer_shutdown_sync(&pg->rexmit_timer);
+ timer_shutdown_sync(&pg->timer);
kfree_rcu(pg, rcu);
}
diff --git a/net/bridge/br_multicast_eht.c b/net/bridge/br_multicast_eht.c
index f91c071d1608..c126aa4e7551 100644
--- a/net/bridge/br_multicast_eht.c
+++ b/net/bridge/br_multicast_eht.c
@@ -142,7 +142,7 @@ static void br_multicast_destroy_eht_set_entry(struct net_bridge_mcast_gc *gc)
set_h = container_of(gc, struct net_bridge_group_eht_set_entry, mcast_gc);
WARN_ON(!RB_EMPTY_NODE(&set_h->rb_node));
- del_timer_sync(&set_h->timer);
+ timer_shutdown_sync(&set_h->timer);
kfree(set_h);
}
@@ -154,7 +154,7 @@ static void br_multicast_destroy_eht_set(struct net_bridge_mcast_gc *gc)
WARN_ON(!RB_EMPTY_NODE(&eht_set->rb_node));
WARN_ON(!RB_EMPTY_ROOT(&eht_set->entry_tree));
- del_timer_sync(&eht_set->timer);
+ timer_shutdown_sync(&eht_set->timer);
kfree(eht_set);
}
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dfa237fbd5a3..1d06e114ba3f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -446,6 +446,7 @@ int ceph_tcp_connect(struct ceph_connection *con)
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
+ sock->sk->sk_use_task_frag = false;
#ifdef CONFIG_LOCKDEP
lockdep_set_class(&sock->sk->sk_lock, &socket_class);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 6004bd0ccee4..032d6d0a5ce6 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1648,10 +1648,13 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
continue;
}
+ devl_lock(devlink);
err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ devl_unlock(devlink);
devlink_put(devlink);
+
if (err)
goto out;
idx++;
@@ -11925,8 +11928,10 @@ void devl_region_destroy(struct devlink_region *region)
devl_assert_locked(devlink);
/* Free all snapshots of region */
+ mutex_lock(&region->snapshot_lock);
list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
devlink_region_snapshot_del(region, snapshot);
+ mutex_unlock(&region->snapshot_lock);
list_del(&region->list);
mutex_destroy(&region->snapshot_lock);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 4fcbdd71c59f..fae9c4694186 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -208,7 +208,7 @@ void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
if (est) {
- del_timer_sync(&est->timer);
+ timer_shutdown_sync(&est->timer);
kfree_rcu(est, rcu);
}
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 679b84cc8794..ca55dd747d6c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1020,7 +1020,7 @@ static void rx_queue_release(struct kobject *kobj)
netdev_put(queue->dev, &queue->dev_tracker);
}
-static const void *rx_queue_namespace(struct kobject *kobj)
+static const void *rx_queue_namespace(const struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
struct device *dev = &queue->dev->dev;
@@ -1032,7 +1032,7 @@ static const void *rx_queue_namespace(struct kobject *kobj)
return ns;
}
-static void rx_queue_get_ownership(struct kobject *kobj,
+static void rx_queue_get_ownership(const struct kobject *kobj,
kuid_t *uid, kgid_t *gid)
{
const struct net *net = rx_queue_namespace(kobj);
@@ -1623,7 +1623,7 @@ static void netdev_queue_release(struct kobject *kobj)
netdev_put(queue->dev, &queue->dev_tracker);
}
-static const void *netdev_queue_namespace(struct kobject *kobj)
+static const void *netdev_queue_namespace(const struct kobject *kobj)
{
struct netdev_queue *queue = to_netdev_queue(kobj);
struct device *dev = &queue->dev->dev;
@@ -1635,7 +1635,7 @@ static const void *netdev_queue_namespace(struct kobject *kobj)
return ns;
}
-static void netdev_queue_get_ownership(struct kobject *kobj,
+static void netdev_queue_get_ownership(const struct kobject *kobj,
kuid_t *uid, kgid_t *gid)
{
const struct net *net = netdev_queue_namespace(kobj);
@@ -1873,9 +1873,9 @@ const struct kobj_ns_type_operations net_ns_type_operations = {
};
EXPORT_SYMBOL_GPL(net_ns_type_operations);
-static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
+static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env)
{
- struct net_device *dev = to_net_dev(d);
+ const struct net_device *dev = to_net_dev(d);
int retval;
/* pass interface to uevent. */
@@ -1910,16 +1910,16 @@ static void netdev_release(struct device *d)
netdev_freemem(dev);
}
-static const void *net_namespace(struct device *d)
+static const void *net_namespace(const struct device *d)
{
- struct net_device *dev = to_net_dev(d);
+ const struct net_device *dev = to_net_dev(d);
return dev_net(dev);
}
-static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
+static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid)
{
- struct net_device *dev = to_net_dev(d);
+ const struct net_device *dev = to_net_dev(d);
const struct net *net = dev_net(dev);
net_ns_get_ownership(net, uid, gid);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3cbba7099c0f..4a0eb5593275 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2482,6 +2482,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
insp = list;
} else {
/* Eaten partially. */
+ if (skb_is_gso(skb) && !list->head_frag &&
+ skb_headlen(list))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */
diff --git a/net/core/sock.c b/net/core/sock.c
index d2587d8712db..f954d5893e79 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3390,6 +3390,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
sk->sk_state = TCP_CLOSE;
+ sk->sk_use_task_frag = true;
sk_set_socket(sk, sock);
sock_set_flag(sk, SOCK_ZAPPED);
diff --git a/net/core/stream.c b/net/core/stream.c
index 5b1fe2b82eac..cd06750dd329 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk)
/* First the read buffer. */
__skb_queue_purge(&sk->sk_receive_queue);
+ /* Next, the error queue.
+ * We need to use queue lock, because other threads might
+ * add packets to the queue without socket lock being held.
+ */
+ skb_queue_purge(&sk->sk_error_queue);
+
/* Next, the write queue. */
WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b58df3c1bf7d..eec1f6df80d8 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -412,7 +412,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
static void ipmr_free_table(struct mr_table *mrt)
{
- del_timer_sync(&mrt->ipmr_expire_timer);
+ timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
diff --git a/net/ipv4/tcp_plb.c b/net/ipv4/tcp_plb.c
index bb1a08fda113..4bcf7eff95e3 100644
--- a/net/ipv4/tcp_plb.c
+++ b/net/ipv4/tcp_plb.c
@@ -97,7 +97,7 @@ void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb)
return;
pause = READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ;
- pause += prandom_u32_max(pause);
+ pause += get_random_u32_below(pause);
plb->pause_until = tcp_jiffies32 + pause;
/* Reset PLB state upon RTO, since an RTO causes a sk_rethink_txhash() call
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 23e766597f36..51cf37abd142 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -392,7 +392,7 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr_table *mrt)
{
- del_timer_sync(&mrt->ipmr_expire_timer);
+ timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 69d5e1ec6ede..3b81e6df3f34 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -512,7 +512,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
mesh_gate_del(tbl, mpath);
spin_unlock_bh(&mpath->state_lock);
- del_timer_sync(&mpath->timer);
+ timer_shutdown_sync(&mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
mesh_path_flush_pending(mpath);
diff --git a/net/mctp/device.c b/net/mctp/device.c
index 99a3bda8852f..acb97b257428 100644
--- a/net/mctp/device.c
+++ b/net/mctp/device.c
@@ -429,12 +429,6 @@ static void mctp_unregister(struct net_device *dev)
struct mctp_dev *mdev;
mdev = mctp_dev_get_rtnl(dev);
- if (mdev && !mctp_known(dev)) {
- // Sanity check, should match what was set in mctp_register
- netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
- __func__, dev->type);
- return;
- }
if (!mdev)
return;
@@ -451,14 +445,8 @@ static int mctp_register(struct net_device *dev)
struct mctp_dev *mdev;
/* Already registered? */
- mdev = rtnl_dereference(dev->mctp_ptr);
-
- if (mdev) {
- if (!mctp_known(dev))
- netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
- __func__, dev->type);
+ if (rtnl_dereference(dev->mctp_ptr))
return 0;
- }
/* only register specific types */
if (!mctp_known(dev))
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a67f7966574..e162636525cf 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -427,7 +427,7 @@ list_set_destroy(struct ip_set *set)
struct set_elem *e, *n;
if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&map->gc);
+ timer_shutdown_sync(&map->gc);
list_for_each_entry_safe(e, n, &map->members, list) {
list_del(&e->list);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c9f598505642..2a5ed71c82c3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2841,6 +2841,11 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
}
out_unlock:
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 7ac7473e3804..1b87214d385e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -384,7 +384,7 @@ static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
struct ip_vs_lblc_table *tbl = svc->sched_data;
/* remove periodic timer */
- del_timer_sync(&tbl->periodic_timer);
+ timer_shutdown_sync(&tbl->periodic_timer);
/* got to clean up table entries here */
ip_vs_lblc_flush(svc);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 77c323c36a88..ad8f5fea6d3a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -547,7 +547,7 @@ static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
struct ip_vs_lblcr_table *tbl = svc->sched_data;
/* remove periodic timer */
- del_timer_sync(&tbl->periodic_timer);
+ timer_shutdown_sync(&tbl->periodic_timer);
/* got to clean up table entries here */
ip_vs_lblcr_flush(svc);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 0fdcdb2c9ae4..4d9b99abe37d 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -383,12 +383,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
const __be32 *addr, const __be32 *mask)
{
struct flow_action_entry *entry;
- int i, j;
+ int i;
- for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
entry = flow_action_entry_next(flow_rule);
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
- offset + i, &addr[j], mask);
+ offset + i * sizeof(u32), &addr[i], mask);
}
}
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 0f8bb0bf558f..8d36303f3935 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -413,7 +413,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
pr_debug("deleting timer %s\n", info->label);
list_del(&info->timer->entry);
- del_timer_sync(&info->timer->timer);
+ timer_shutdown_sync(&info->timer->timer);
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
kfree(info->timer->attr.attr.name);
@@ -441,7 +441,7 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
alarm_cancel(&info->timer->alarm);
} else {
- del_timer_sync(&info->timer->timer);
+ timer_shutdown_sync(&info->timer->timer);
}
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 0371c387b0d1..66b0f941d8fb 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -166,7 +166,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
list_del(&ledinternal->list);
- del_timer_sync(&ledinternal->timer);
+ timer_shutdown_sync(&ledinternal->timer);
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 932bcf766d63..9ca721c9fa71 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -973,6 +973,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
+ struct sw_flow_key *key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
@@ -1000,24 +1001,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* Extract key. */
- ovs_match_init(&match, &new_flow->key, false, &mask);
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key) {
+ error = -ENOMEM;
+ goto err_kfree_key;
+ }
+
+ ovs_match_init(&match, key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
+ ovs_flow_mask_key(&new_flow->key, key, true, &mask);
+
/* Extract flow identifier. */
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
- &new_flow->key, log);
+ key, log);
if (error)
goto err_kfree_flow;
- /* unmasked key is needed to match when ufid is not used. */
- if (ovs_identifier_is_key(&new_flow->id))
- match.key = new_flow->id.unmasked_key;
-
- ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
-
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
@@ -1044,7 +1047,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (ovs_identifier_is_ufid(&new_flow->id))
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
if (!flow)
- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
+ flow = ovs_flow_tbl_lookup(&dp->table, key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
@@ -1114,6 +1117,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
+
+ kfree(key);
return 0;
err_unlock_ovs:
@@ -1123,6 +1128,8 @@ err_kfree_acts:
ovs_nla_free_flow_actions(acts);
err_kfree_flow:
ovs_flow_free(new_flow, false);
+err_kfree_key:
+ kfree(key);
error:
return error;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index dac4fdc7488a..b390ff245d5e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -832,7 +832,7 @@ static void rfkill_release(struct device *dev)
kfree(rfkill);
}
-static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long flags;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index e7dccab7b741..18092526d3c8 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -287,6 +287,7 @@ struct rxrpc_local {
struct hlist_node link;
struct socket *socket; /* my UDP socket */
struct task_struct *io_thread;
+ struct completion io_thread_ready; /* Indication that the I/O thread started */
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
struct sk_buff_head rx_queue; /* Received packets */
@@ -811,9 +812,9 @@ extern struct workqueue_struct *rxrpc_workqueue;
*/
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *);
-bool rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
- struct rxrpc_connection *, struct sockaddr_rxrpc *,
- struct sk_buff *);
+int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
+ struct rxrpc_connection *, struct sockaddr_rxrpc *,
+ struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
@@ -1072,7 +1073,6 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
-void rxrpc_put_peer_locked(struct rxrpc_peer *, enum rxrpc_peer_trace);
/*
* proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index d1850863507f..c02401656fa9 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -326,11 +326,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
* If we want to report an error, we mark the skb with the packet type and
* abort code and return false.
*/
-bool rxrpc_new_incoming_call(struct rxrpc_local *local,
- struct rxrpc_peer *peer,
- struct rxrpc_connection *conn,
- struct sockaddr_rxrpc *peer_srx,
- struct sk_buff *skb)
+int rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
+ struct rxrpc_connection *conn,
+ struct sockaddr_rxrpc *peer_srx,
+ struct sk_buff *skb)
{
const struct rxrpc_security *sec = NULL;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -342,7 +342,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
/* Don't set up a call for anything other than the first DATA packet. */
if (sp->hdr.seq != 1 ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
- return true; /* Just discard */
+ return 0; /* Just discard */
rcu_read_lock();
@@ -413,7 +413,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
_leave(" = %p{%d}", call, call->debug_id);
rxrpc_input_call_event(call, skb);
rxrpc_put_call(call, rxrpc_call_put_input);
- return true;
+ return 0;
unsupported_service:
trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -425,10 +425,10 @@ no_call:
reject:
rcu_read_unlock();
_leave(" = f [%u]", skb->mark);
- return false;
+ return -EPROTO;
discard:
rcu_read_unlock();
- return true;
+ return 0;
}
/*
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index be5eb8cdf549..89dcf60b1158 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -217,6 +217,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
call->tx_total_len = p->tx_total_len;
call->key = key_get(cp->key);
call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call);
+ call->security_level = cp->security_level;
if (p->kernel)
__set_bit(RXRPC_CALL_KERNEL, &call->flags);
if (cp->upgrade)
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index a08e33c9e54b..87efa0373aed 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -551,8 +551,6 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
call->cid = conn->proto.cid | channel;
call->call_id = call_id;
- call->security = conn->security;
- call->security_ix = conn->security_ix;
call->dest_srx.srx_service = conn->service_id;
trace_rxrpc_connect_call(call);
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index d83ae3193032..1ad067d66fb6 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -292,7 +292,7 @@ protocol_error:
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
reject_packet:
rxrpc_reject_packet(local, skb);
- return ret;
+ return 0;
}
/*
@@ -384,7 +384,7 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
if (rxrpc_to_client(sp))
goto bad_message;
if (rxrpc_new_incoming_call(conn->local, conn->peer, conn,
- peer_srx, skb))
+ peer_srx, skb) == 0)
return 0;
goto reject_packet;
}
@@ -425,6 +425,9 @@ int rxrpc_io_thread(void *data)
struct rxrpc_local *local = data;
struct rxrpc_call *call;
struct sk_buff *skb;
+ bool should_stop;
+
+ complete(&local->io_thread_ready);
skb_queue_head_init(&rx_queue);
@@ -476,13 +479,14 @@ int rxrpc_io_thread(void *data)
}
set_current_state(TASK_INTERRUPTIBLE);
+ should_stop = kthread_should_stop();
if (!skb_queue_empty(&local->rx_queue) ||
!list_empty(&local->call_attend_q)) {
__set_current_state(TASK_RUNNING);
continue;
}
- if (kthread_should_stop())
+ if (should_stop)
break;
schedule();
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 44222923c0d1..270b63d8f37a 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -97,6 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local->rxnet = rxnet;
INIT_HLIST_NODE(&local->link);
init_rwsem(&local->defrag_sem);
+ init_completion(&local->io_thread_ready);
skb_queue_head_init(&local->rx_queue);
INIT_LIST_HEAD(&local->call_attend_q);
local->client_bundles = RB_ROOT;
@@ -189,6 +190,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
goto error_sock;
}
+ wait_for_completion(&local->io_thread_ready);
local->io_thread = io_thread;
_leave(" = 0");
return 0;
@@ -357,10 +359,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local,
*/
void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
{
- unsigned int debug_id = local->debug_id;
+ unsigned int debug_id;
int r, u;
if (local) {
+ debug_id = local->debug_id;
r = refcount_read(&local->ref);
u = atomic_dec_return(&local->active_users);
trace_rxrpc_local(debug_id, why, r, u);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 6685bf917aa6..552ba84a255c 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -235,6 +235,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
struct rxrpc_peer *peer;
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t keepalive_at;
+ bool use;
int slot;
spin_lock(&rxnet->peer_hash_lock);
@@ -247,9 +248,10 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
continue;
- if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) {
- spin_unlock(&rxnet->peer_hash_lock);
+ use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
+ spin_unlock(&rxnet->peer_hash_lock);
+ if (use) {
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
@@ -270,9 +272,11 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_lock(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
+ spin_unlock(&rxnet->peer_hash_lock);
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
}
- rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive);
+ rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
+ spin_lock(&rxnet->peer_hash_lock);
}
spin_unlock(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 608946dcc505..4eecea2be307 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -226,7 +226,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
rxrpc_peer_init_rtt(peer);
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
- trace_rxrpc_peer(peer->debug_id, why, 1);
+ trace_rxrpc_peer(peer->debug_id, 1, why);
}
_leave(" = %p", peer);
@@ -382,7 +382,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace
int r;
__refcount_inc(&peer->ref, &r);
- trace_rxrpc_peer(peer->debug_id, why, r + 1);
+ trace_rxrpc_peer(peer->debug_id, r + 1, why);
return peer;
}
@@ -439,25 +439,6 @@ void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
}
/*
- * Drop a ref on a peer record where the caller already holds the
- * peer_hash_lock.
- */
-void rxrpc_put_peer_locked(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
-{
- unsigned int debug_id = peer->debug_id;
- bool dead;
- int r;
-
- dead = __refcount_dec_and_test(&peer->ref, &r);
- trace_rxrpc_peer(debug_id, r - 1, why);
- if (dead) {
- hash_del_rcu(&peer->hash_link);
- list_del_init(&peer->keepalive_link);
- rxrpc_free_peer(peer);
- }
-}
-
-/*
* Make sure all peer records have been discarded.
*/
void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
index 66f5eea291ff..d33a109e846c 100644
--- a/net/rxrpc/rxperf.c
+++ b/net/rxrpc/rxperf.c
@@ -275,7 +275,7 @@ static void rxperf_deliver_to_call(struct work_struct *work)
struct rxperf_call *call = container_of(work, struct rxperf_call, work);
enum rxperf_call_state state;
u32 abort_code, remote_abort = 0;
- int ret;
+ int ret = 0;
if (call->state == RXPERF_CALL_COMPLETE)
return;
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 209f2c25a0da..ab968f65a490 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -67,13 +67,13 @@ const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
*/
int rxrpc_init_client_call_security(struct rxrpc_call *call)
{
- const struct rxrpc_security *sec;
+ const struct rxrpc_security *sec = &rxrpc_no_security;
struct rxrpc_key_token *token;
struct key *key = call->key;
int ret;
if (!key)
- return 0;
+ goto found;
ret = key_validate(key);
if (ret < 0)
@@ -88,7 +88,7 @@ int rxrpc_init_client_call_security(struct rxrpc_call *call)
found:
call->security = sec;
- _leave(" = 0");
+ call->security_ix = sec->security_index;
return 0;
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 9fa7e37f7155..cde1e65f16b4 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -625,7 +625,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (call->tx_total_len != -1 ||
call->tx_pending ||
call->tx_top != 0)
- goto error_put;
+ goto out_put_unlock;
call->tx_total_len = p.call.tx_total_len;
}
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 535668e1f748..6ab317b48d6c 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -369,7 +369,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
static void __flow_destroy_filter(struct flow_filter *f)
{
- del_timer_sync(&f->perturb_timer);
+ timer_shutdown_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
tcf_exts_put_net(&f->exts);
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 4ce681361851..5c1235e6076a 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* the value carried.
*/
if (em_hdr->flags & TCF_EM_SIMPLE) {
+ if (em->ops->datalen > 0)
+ goto errout;
if (data_len < sizeof(u32))
goto errout;
em->data = *(u32 *) data;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 8f1b596db33f..85f0c3cfc877 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -567,7 +567,7 @@ svc_destroy(struct kref *ref)
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
- del_timer_sync(&serv->sv_temptimer);
+ timer_shutdown_sync(&serv->sv_temptimer);
/*
* The last user is gone and thus all sockets have to be destroyed to
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index c1f559892ae8..1e05a2d723f4 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -31,7 +31,7 @@ static void rpc_sysfs_object_release(struct kobject *kobj)
}
static const struct kobj_ns_type_operations *
-rpc_sysfs_object_child_ns_type(struct kobject *kobj)
+rpc_sysfs_object_child_ns_type(const struct kobject *kobj)
{
return &net_ns_type_operations;
}
@@ -381,17 +381,17 @@ static void rpc_sysfs_xprt_release(struct kobject *kobj)
kfree(xprt);
}
-static const void *rpc_sysfs_client_namespace(struct kobject *kobj)
+static const void *rpc_sysfs_client_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct rpc_sysfs_client, kobject)->net;
}
-static const void *rpc_sysfs_xprt_switch_namespace(struct kobject *kobj)
+static const void *rpc_sysfs_xprt_switch_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct rpc_sysfs_xprt_switch, kobject)->net;
}
-static const void *rpc_sysfs_xprt_namespace(struct kobject *kobj)
+static const void *rpc_sysfs_xprt_namespace(const struct kobject *kobj)
{
return container_of(kobj, struct rpc_sysfs_xprt,
kobject)->xprt->xprt_net;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c0506d0d7478..aaa5b2741b79 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1882,6 +1882,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_write_space = xs_udp_write_space;
sk->sk_state_change = xs_local_state_change;
sk->sk_error_report = xs_error_report;
+ sk->sk_use_task_frag = false;
xprt_clear_connected(xprt);
@@ -2082,6 +2083,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_data_ready;
sk->sk_write_space = xs_udp_write_space;
+ sk->sk_use_task_frag = false;
xprt_set_connected(xprt);
@@ -2249,6 +2251,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
sk->sk_error_report = xs_error_report;
+ sk->sk_use_task_frag = false;
/* socket options */
sock_reset_flag(sk, SOCK_LINGER);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index e8dcdf267c0c..685389d4b245 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -388,7 +388,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
*/
void tipc_disc_delete(struct tipc_discoverer *d)
{
- del_timer_sync(&d->timer);
+ timer_shutdown_sync(&d->timer);
kfree_skb(d->skb);
kfree(d);
}
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 9618e4429f0f..77a3d016cade 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -700,7 +700,7 @@ void tipc_mon_delete(struct net *net, int bearer_id)
}
mon->self = NULL;
write_unlock_bh(&mon->lock);
- del_timer_sync(&mon->timer);
+ timer_shutdown_sync(&mon->timer);
kfree(self->domain);
kfree(self);
kfree(mon);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ede2b2a140a4..f0c2293f1d3b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1999,13 +1999,20 @@ restart_locked:
unix_state_lock(sk);
err = 0;
- if (unix_peer(sk) == other) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ /* We are here only when racing with unix_release_sock()
+ * is clearing @other. Never change state to TCP_CLOSE
+ * unlike SOCK_DGRAM wants.
+ */
+ unix_state_unlock(sk);
+ err = -EPIPE;
+ } else if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+ sk->sk_state = TCP_CLOSE;
unix_state_unlock(sk);
- sk->sk_state = TCP_CLOSE;
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0c3f05c9be27..cdb638647e0b 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -148,7 +148,7 @@ static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume);
#define WIPHY_PM_OPS NULL
#endif
-static const void *wiphy_namespace(struct device *d)
+static const void *wiphy_namespace(const struct device *d)
{
struct wiphy *wiphy = container_of(d, struct wiphy, dev);
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index d6fece1ed982..74a54295c164 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -489,6 +489,7 @@ static int espintcp_init_sk(struct sock *sk)
/* avoid using task_frag */
sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_use_task_frag = false;
return 0;