summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/bluetooth/6lowpan.c5
-rw-r--r--net/bluetooth/hci_core.c28
-rw-r--r--net/bluetooth/hci_event.c11
-rw-r--r--net/bpfilter/bpfilter_kern.c1
-rw-r--r--net/can/j1939/socket.c14
-rw-r--r--net/can/j1939/transport.c89
-rw-r--r--net/compat.c1
-rw-r--r--net/core/filter.c75
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c46
-rw-r--r--net/ethtool/features.c19
-rw-r--r--net/ipv4/inet_connection_sock.c97
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/nexthop.c5
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_fastopen.c23
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/nft_exthdr.c4
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netlink/policy.c3
-rw-r--r--net/nfc/rawsock.c7
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/qrtr/qrtr.c20
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/sctp/stream.c6
-rw-r--r--net/smc/smc_diag.c16
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c28
-rw-r--r--net/tipc/crypto.c2
-rw-r--r--net/tipc/netlink_compat.c12
-rw-r--r--net/tls/tls_device.c3
-rw-r--r--net/vmw_vsock/af_vsock.c2
39 files changed, 419 insertions, 181 deletions
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 550c6ca007cc..9c1241292d1d 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -229,6 +229,8 @@ int __init atalk_proc_init(void)
sizeof(struct aarp_iter_state), NULL))
goto out;
+ return 0;
+
out:
remove_proc_subtree("atalk", init_net.proc_net);
return -ENOMEM;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index bb55d92691b0..cff4944d5b66 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -50,6 +50,7 @@ static bool enable_6lowpan;
/* We are listening incoming connections via this channel
*/
static struct l2cap_chan *listen_chan;
+static DEFINE_MUTEX(set_lock);
struct lowpan_peer {
struct list_head list;
@@ -1078,12 +1079,14 @@ static void do_enable_set(struct work_struct *work)
enable_6lowpan = set_enable->flag;
+ mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
}
listen_chan = bt_6lowpan_listen();
+ mutex_unlock(&set_lock);
kfree(set_enable);
}
@@ -1135,11 +1138,13 @@ static ssize_t lowpan_control_write(struct file *fp,
if (ret == -EINVAL)
return ret;
+ mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
listen_chan = NULL;
}
+ mutex_unlock(&set_lock);
if (conn) {
struct lowpan_peer *peer;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index dbe2d79f233f..41fba93d857a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -606,7 +606,8 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[8] & 0x01)
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
- if (hdev->commands[18] & 0x04)
+ if (hdev->commands[18] & 0x04 &&
+ !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
/* Some older Broadcom based Bluetooth 1.2 controllers do not
@@ -851,7 +852,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
/* Set erroneous data reporting if supported to the wideband speech
* setting value
*/
- if (hdev->commands[18] & 0x08) {
+ if (hdev->commands[18] & 0x08 &&
+ !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
bool enabled = hci_dev_test_flag(hdev,
HCI_WIDEBAND_SPEECH_ENABLED);
@@ -3289,10 +3291,10 @@ static int hci_suspend_wait_event(struct hci_dev *hdev)
WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
if (ret == 0) {
- bt_dev_dbg(hdev, "Timed out waiting for suspend");
+ bt_dev_err(hdev, "Timed out waiting for suspend events");
for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
if (test_bit(i, hdev->suspend_tasks))
- bt_dev_dbg(hdev, "Bit %d is set", i);
+ bt_dev_err(hdev, "Suspend timeout bit: %d", i);
clear_bit(i, hdev->suspend_tasks);
}
@@ -3360,12 +3362,15 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
ret = hci_change_suspend_state(hdev, BT_RUNNING);
}
- /* If suspend failed, restore it to running */
- if (ret && action == PM_SUSPEND_PREPARE)
- hci_change_suspend_state(hdev, BT_RUNNING);
-
done:
- return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
+ /* We always allow suspend even if suspend preparation failed and
+ * attempt to recover in resume.
+ */
+ if (ret)
+ bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+ action, ret);
+
+ return NOTIFY_STOP;
}
/* Alloc HCI device */
@@ -3603,9 +3608,10 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on);
- hci_dev_do_close(hdev);
-
unregister_pm_notifier(&hdev->suspend_notifier);
+ cancel_work_sync(&hdev->suspend_prepare);
+
+ hci_dev_do_close(hdev);
if (!test_bit(HCI_INIT, &hdev->flags) &&
!hci_dev_test_flag(hdev, HCI_SETUP) &&
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index af9d7f2ff8ba..6c6c9a81bee2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2520,7 +2520,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
- if (!num_rsp)
+ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@@ -4166,6 +4166,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
+ if (skb->len < num_rsp * sizeof(*info) + 1)
+ goto unlock;
+
for (; num_rsp; num_rsp--, info++) {
u32 flags;
@@ -4187,6 +4190,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
+ if (skb->len < num_rsp * sizeof(*info) + 1)
+ goto unlock;
+
for (; num_rsp; num_rsp--, info++) {
u32 flags;
@@ -4207,6 +4213,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
}
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -4382,7 +4389,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
- if (!num_rsp)
+ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index 4494ea6056cd..42b88a92afe9 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -50,6 +50,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
req.len = optlen;
if (!bpfilter_ops.info.pid)
goto out;
+ pos = 0;
n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
&pos);
if (n != sizeof(req)) {
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index f7587428febd..bf9fd6ee88fe 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk)
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
sk->sk_destruct = j1939_sk_sock_destruct;
+ sk->sk_protocol = CAN_J1939;
return 0;
}
@@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
goto out_release_sock;
}
+ if (!ndev->ml_priv) {
+ netdev_warn_once(ndev,
+ "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+ dev_put(ndev);
+ ret = -ENODEV;
+ goto out_release_sock;
+ }
+
priv = j1939_netdev_start(ndev);
dev_put(ndev);
if (IS_ERR(priv)) {
@@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
const struct j1939_sock *jsk, int peer)
{
+ /* There are two holes (2 bytes and 3 bytes) to clear to avoid
+ * leaking kernel information to user space.
+ */
+ memset(addr, 0, J1939_MIN_NAMELEN);
+
addr->can_family = AF_CAN;
addr->can_ifindex = jsk->ifindex;
addr->can_addr.j1939.pgn = jsk->addr.pgn;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 9f99af5b0b11..dbd215cbc53d 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session,
skb_queue_tail(&session->skb_queue, skb);
}
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct
+sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+ unsigned int offset_start)
{
struct j1939_priv *priv = session->priv;
+ struct j1939_sk_buff_cb *do_skcb;
struct sk_buff *skb = NULL;
struct sk_buff *do_skb;
- struct j1939_sk_buff_cb *do_skcb;
- unsigned int offset_start;
unsigned long flags;
- offset_start = session->pkt.dpo * 7;
-
spin_lock_irqsave(&session->skb_queue.lock, flags);
skb_queue_walk(&session->skb_queue, do_skb) {
do_skcb = j1939_skb_to_cb(do_skb);
@@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
return skb;
}
+static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+{
+ unsigned int offset_start;
+
+ offset_start = session->pkt.dpo * 7;
+ return j1939_session_skb_find_by_offset(session, offset_start);
+}
+
/* see if we are receiver
* returns 0 for broadcasts, although we will receive them
*/
@@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session)
return ret;
session->last_txcmd = dat[0];
- if (dat[0] == J1939_TP_CMD_BAM)
+ if (dat[0] == J1939_TP_CMD_BAM) {
j1939_tp_schedule_txtimer(session, 50);
-
- j1939_tp_set_rxtimeout(session, 1250);
+ j1939_tp_set_rxtimeout(session, 250);
+ } else {
+ j1939_tp_set_rxtimeout(session, 1250);
+ }
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
@@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
int ret = 0;
u8 dat[8];
- se_skb = j1939_session_skb_find(session);
+ se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
if (!se_skb)
return -ENOBUFS;
@@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session)
if (len > 7)
len = 7;
+ if (offset + len > se_skb->len) {
+ netdev_err_once(priv->ndev,
+ "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
+ __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
+ return -EOVERFLOW;
+ }
+
+ if (!len) {
+ ret = -ENOBUFS;
+ break;
+ }
+
memcpy(&dat[1], &tpdat[offset], len);
ret = j1939_tp_tx_dat(session, dat, len + 1);
if (ret < 0) {
@@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session,
lockdep_assert_held(&session->priv->active_session_list_lock);
session->err = j1939_xtp_abort_to_errno(priv, err);
+ session->state = J1939_SESSION_WAITING_ABORT;
/* do not send aborts on incoming broadcasts */
if (!j1939_cb_is_broadcast(&session->skcb)) {
- session->state = J1939_SESSION_WAITING_ABORT;
j1939_xtp_tx_abort(priv, &session->skcb,
!session->transmission,
err, session->skcb.addr.pgn);
@@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
* cleanup including propagation of the error to user space.
*/
break;
+ case -EOVERFLOW:
+ j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
+ break;
case 0:
session->tx_retry = 0;
break;
@@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
return;
}
session = j1939_xtp_rx_rts_session_new(priv, skb);
- if (!session)
+ if (!session) {
+ if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
+ netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
+ __func__);
return;
+ }
} else {
if (j1939_xtp_rx_rts_session_active(session, skb)) {
j1939_session_put(session);
@@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
}
session->last_cmd = cmd;
- j1939_tp_set_rxtimeout(session, 1250);
-
- if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
- j1939_session_txtimer_cancel(session);
- j1939_tp_schedule_txtimer(session, 0);
+ if (cmd == J1939_TP_CMD_BAM) {
+ if (!session->transmission)
+ j1939_tp_set_rxtimeout(session, 750);
+ } else {
+ if (!session->transmission) {
+ j1939_session_txtimer_cancel(session);
+ j1939_tp_schedule_txtimer(session, 0);
+ }
+ j1939_tp_set_rxtimeout(session, 1250);
}
j1939_session_put(session);
@@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
int offset;
int nbytes;
bool final = false;
+ bool remain = false;
bool do_cts_eoma = false;
int packet;
@@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
__func__, session);
goto out_session_cancel;
}
- se_skb = j1939_session_skb_find(session);
+
+ se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
if (!se_skb) {
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
session);
@@ -1777,6 +1811,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
j1939_cb_is_broadcast(&session->skcb)) {
if (session->pkt.rx >= session->pkt.total)
final = true;
+ else
+ remain = true;
} else {
/* never final, an EOMA must follow */
if (session->pkt.rx >= session->pkt.last)
@@ -1784,7 +1820,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
}
if (final) {
+ j1939_session_timers_cancel(session);
j1939_session_completed(session);
+ } else if (remain) {
+ if (!session->transmission)
+ j1939_tp_set_rxtimeout(session, 750);
} else if (do_cts_eoma) {
j1939_tp_set_rxtimeout(session, 1250);
if (!session->transmission)
@@ -1829,6 +1869,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
else
j1939_xtp_rx_dat_one(session, skb);
}
+
+ if (j1939_cb_is_broadcast(skcb)) {
+ session = j1939_session_get_by_addr(priv, &skcb->addr, false,
+ false);
+ if (session)
+ j1939_xtp_rx_dat_one(session, skb);
+ }
}
/* j1939 main intf */
@@ -1920,7 +1967,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_rts(priv, skb, true);
- if (j1939_tp_im_receiver(skcb))
+ if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
j1939_xtp_rx_rts(priv, skb, false);
break;
@@ -1984,7 +2031,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
- if (!j1939_tp_im_involved_anydir(skcb))
+ if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
return 0;
switch (skcb->addr.pgn) {
@@ -2017,6 +2064,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
if (!skb->sk)
return;
+ if (skb->sk->sk_family != AF_CAN ||
+ skb->sk->sk_protocol != CAN_J1939)
+ return;
+
j1939_session_list_lock(priv);
session = j1939_session_get_simple(priv, skb);
j1939_session_list_unlock(priv);
diff --git a/net/compat.c b/net/compat.c
index 434838bef5f8..7dc670c8eac5 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -309,6 +309,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
break;
}
/* Bump the usage count and install the file. */
+ __receive_sock(fp[i]);
fd_install(new_fd, get_file(fp[i]));
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 82e1b5b06167..a69e79327c29 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8249,15 +8249,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
/* Helper macro for adding read access to tcp_sock or sock fields. */
#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
do { \
+ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ fullsock_reg = reg; \
+ jmp += 2; \
+ } \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \
is_fullsock), \
- si->dst_reg, si->src_reg, \
+ fullsock_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
is_fullsock)); \
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
+ if (si->dst_reg == si->src_reg) \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \
@@ -8266,6 +8282,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
OBJ_FIELD), \
si->dst_reg, si->dst_reg, \
offsetof(OBJ, OBJ_FIELD)); \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_JMP_A(1); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } \
+ } while (0)
+
+#define SOCK_OPS_GET_SK() \
+ do { \
+ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ fullsock_reg = reg; \
+ jmp += 2; \
+ } \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ fullsock_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
+ if (si->dst_reg == si->src_reg) \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_JMP_A(1); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } \
} while (0)
#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
@@ -8552,17 +8611,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
break;
case offsetof(struct bpf_sock_ops, sk):
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern,
- is_fullsock),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern,
- is_fullsock));
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern, sk),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern, sk));
+ SOCK_OPS_GET_SK();
break;
}
return insn - insn_buf;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b8afefe6f6b6..7afe52bd038b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5419,8 +5419,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto err_free;
-
- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+ /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
goto err_free;
vhdr = (struct vlan_hdr *)skb->data;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2e5b7870e5d3..78f8736be9c5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2842,6 +2842,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *
}
EXPORT_SYMBOL(sock_no_mmap);
+/*
+ * When a file is received (via SCM_RIGHTS, etc), we must bump the
+ * various sock-based usage counts.
+ */
+void __receive_sock(struct file *file)
+{
+ struct socket *sock;
+ int error;
+
+ /*
+ * The resulting value of "error" is ignored here since we only
+ * need to take action when the file is a socket and testing
+ * "sock" for NULL is sufficient.
+ */
+ sock = sock_from_file(file, &error);
+ if (sock) {
+ sock_update_netprioidx(&sock->sk->sk_cgrp_data);
+ sock_update_classid(&sock->sk->sk_cgrp_data);
+ }
+}
+
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
@@ -3443,6 +3464,16 @@ static void sock_inuse_add(struct net *net, int val)
}
#endif
+static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
+{
+ if (!twsk_prot)
+ return;
+ kfree(twsk_prot->twsk_slab_name);
+ twsk_prot->twsk_slab_name = NULL;
+ kmem_cache_destroy(twsk_prot->twsk_slab);
+ twsk_prot->twsk_slab = NULL;
+}
+
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
{
if (!rsk_prot)
@@ -3513,7 +3544,7 @@ int proto_register(struct proto *prot, int alloc_slab)
prot->slab_flags,
NULL);
if (prot->twsk_prot->twsk_slab == NULL)
- goto out_free_timewait_sock_slab_name;
+ goto out_free_timewait_sock_slab;
}
}
@@ -3521,15 +3552,15 @@ int proto_register(struct proto *prot, int alloc_slab)
ret = assign_proto_idx(prot);
if (ret) {
mutex_unlock(&proto_list_mutex);
- goto out_free_timewait_sock_slab_name;
+ goto out_free_timewait_sock_slab;
}
list_add(&prot->node, &proto_list);
mutex_unlock(&proto_list_mutex);
return ret;
-out_free_timewait_sock_slab_name:
+out_free_timewait_sock_slab:
if (alloc_slab && prot->twsk_prot)
- kfree(prot->twsk_prot->twsk_slab_name);
+ tw_prot_cleanup(prot->twsk_prot);
out_free_request_sock_slab:
if (alloc_slab) {
req_prot_cleanup(prot->rsk_prot);
@@ -3553,12 +3584,7 @@ void proto_unregister(struct proto *prot)
prot->slab = NULL;
req_prot_cleanup(prot->rsk_prot);
-
- if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
- kmem_cache_destroy(prot->twsk_prot->twsk_slab);
- kfree(prot->twsk_prot->twsk_slab_name);
- prot->twsk_prot->twsk_slab = NULL;
- }
+ tw_prot_cleanup(prot->twsk_prot);
}
EXPORT_SYMBOL(proto_unregister);
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
index 4e632dc987d8..495635f152ba 100644
--- a/net/ethtool/features.c
+++ b/net/ethtool/features.c
@@ -224,7 +224,9 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
DECLARE_BITMAP(wanted_diff_mask, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(active_diff_mask, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(old_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(old_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(new_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(new_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT);
struct nlattr *tb[ETHTOOL_A_FEATURES_MAX + 1];
@@ -250,6 +252,7 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
ethnl_features_to_bitmap(old_active, dev->features);
+ ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
tb[ETHTOOL_A_FEATURES_WANTED],
netdev_features_strings, info->extack);
@@ -261,17 +264,15 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
goto out_rtnl;
}
- /* set req_wanted bits not in req_mask from old_active */
+ /* set req_wanted bits not in req_mask from old_wanted */
bitmap_and(req_wanted, req_wanted, req_mask, NETDEV_FEATURE_COUNT);
- bitmap_andnot(new_active, old_active, req_mask, NETDEV_FEATURE_COUNT);
- bitmap_or(req_wanted, new_active, req_wanted, NETDEV_FEATURE_COUNT);
- if (bitmap_equal(req_wanted, old_active, NETDEV_FEATURE_COUNT)) {
- ret = 0;
- goto out_rtnl;
+ bitmap_andnot(new_wanted, old_wanted, req_mask, NETDEV_FEATURE_COUNT);
+ bitmap_or(req_wanted, new_wanted, req_wanted, NETDEV_FEATURE_COUNT);
+ if (!bitmap_equal(req_wanted, old_wanted, NETDEV_FEATURE_COUNT)) {
+ dev->wanted_features &= ~dev->hw_features;
+ dev->wanted_features |= ethnl_bitmap_to_features(req_wanted) & dev->hw_features;
+ __netdev_update_features(dev);
}
-
- dev->wanted_features = ethnl_bitmap_to_features(req_wanted);
- __netdev_update_features(dev);
ethnl_features_to_bitmap(new_active, dev->features);
mod = !bitmap_equal(old_active, new_active, NETDEV_FEATURE_COUNT);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index afaf582a5aa9..a1be020bde8e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -296,6 +296,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
ipv6_only_sock(sk), true, false);
}
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk)
+{
+ kuid_t uid = sock_i_uid(sk);
+ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+
+ if (hlist_empty(&tb->owners)) {
+ tb->fastreuse = reuse;
+ if (sk->sk_reuseport) {
+ tb->fastreuseport = FASTREUSEPORT_ANY;
+ tb->fastuid = uid;
+ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+ tb->fast_ipv6_only = ipv6_only_sock(sk);
+ tb->fast_sk_family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+ } else {
+ tb->fastreuseport = 0;
+ }
+ } else {
+ if (!reuse)
+ tb->fastreuse = 0;
+ if (sk->sk_reuseport) {
+ /* We didn't match or we don't have fastreuseport set on
+ * the tb, but we have sk_reuseport set on this socket
+ * and we know that there are no bind conflicts with
+ * this socket in this tb, so reset our tb's reuseport
+ * settings so that any subsequent sockets that match
+ * our current socket will be put on the fast path.
+ *
+ * If we reset we need to set FASTREUSEPORT_STRICT so we
+ * do extra checking for all subsequent sk_reuseport
+ * socks.
+ */
+ if (!sk_reuseport_match(tb, sk)) {
+ tb->fastreuseport = FASTREUSEPORT_STRICT;
+ tb->fastuid = uid;
+ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+ tb->fast_ipv6_only = ipv6_only_sock(sk);
+ tb->fast_sk_family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+ }
+ } else {
+ tb->fastreuseport = 0;
+ }
+ }
+}
+
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
* We try to allocate an odd port (and leave even ports for connect())
@@ -308,7 +359,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
struct inet_bind_bucket *tb = NULL;
- kuid_t uid = sock_i_uid(sk);
int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk);
@@ -345,49 +395,8 @@ tb_found:
goto fail_unlock;
}
success:
- if (hlist_empty(&tb->owners)) {
- tb->fastreuse = reuse;
- if (sk->sk_reuseport) {
- tb->fastreuseport = FASTREUSEPORT_ANY;
- tb->fastuid = uid;
- tb->fast_rcv_saddr = sk->sk_rcv_saddr;
- tb->fast_ipv6_only = ipv6_only_sock(sk);
- tb->fast_sk_family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
- tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-#endif
- } else {
- tb->fastreuseport = 0;
- }
- } else {
- if (!reuse)
- tb->fastreuse = 0;
- if (sk->sk_reuseport) {
- /* We didn't match or we don't have fastreuseport set on
- * the tb, but we have sk_reuseport set on this socket
- * and we know that there are no bind conflicts with
- * this socket in this tb, so reset our tb's reuseport
- * settings so that any subsequent sockets that match
- * our current socket will be put on the fast path.
- *
- * If we reset we need to set FASTREUSEPORT_STRICT so we
- * do extra checking for all subsequent sk_reuseport
- * socks.
- */
- if (!sk_reuseport_match(tb, sk)) {
- tb->fastreuseport = FASTREUSEPORT_STRICT;
- tb->fastuid = uid;
- tb->fast_rcv_saddr = sk->sk_rcv_saddr;
- tb->fast_ipv6_only = ipv6_only_sock(sk);
- tb->fast_sk_family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
- tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-#endif
- }
- } else {
- tb->fastreuseport = 0;
- }
- }
+ inet_csk_update_fastreuse(tb, sk);
+
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, port);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 2bbaaf0c7176..006a34b18537 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
return -ENOMEM;
}
}
+ inet_csk_update_fastreuse(tb, child);
}
inet_bind_hash(child, tb, port);
spin_unlock(&head->lock);
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index cc8049b100b2..134e92382275 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -446,7 +446,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
unsigned int i, j;
u8 nhg_fdb = 0;
- if (len & (sizeof(struct nexthop_grp) - 1)) {
+ if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
NL_SET_ERR_MSG(extack,
"Invalid length for nexthop group attribute");
return -EINVAL;
@@ -1187,6 +1187,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
struct nexthop *nh;
int i;
+ if (WARN_ON(!num_nh))
+ return ERR_PTR(-EINVAL);
+
nh = nexthop_alloc();
if (!nh)
return ERR_PTR(-ENOMEM);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5653e3b011bf..54023a46db04 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -301,24 +301,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
2 * TCP_FASTOPEN_KEY_MAX) +
(TCP_FASTOPEN_KEY_MAX * 5)) };
- struct tcp_fastopen_context *ctx;
- u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
- __le32 key[TCP_FASTOPEN_KEY_MAX * 4];
+ u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
+ __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
char *backup_data;
- int ret, i = 0, off = 0, n_keys = 0;
+ int ret, i = 0, off = 0, n_keys;
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
if (!tbl.data)
return -ENOMEM;
- rcu_read_lock();
- ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
- if (ctx) {
- n_keys = tcp_fastopen_context_len(ctx);
- memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
- }
- rcu_read_unlock();
-
+ n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
if (!n_keys) {
memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
n_keys = 1;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6f0caf9a866d..30c1142584b1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3694,22 +3694,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return 0;
case TCP_FASTOPEN_KEY: {
- __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
- struct tcp_fastopen_context *ctx;
- unsigned int key_len = 0;
+ u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
+ unsigned int key_len;
if (get_user(len, optlen))
return -EFAULT;
- rcu_read_lock();
- ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
- if (ctx) {
- key_len = tcp_fastopen_context_len(ctx) *
- TCP_FASTOPEN_KEY_LENGTH;
- memcpy(&key[0], &ctx->key[0], key_len);
- }
- rcu_read_unlock();
-
+ key_len = tcp_fastopen_get_cipher(net, icsk, key) *
+ TCP_FASTOPEN_KEY_LENGTH;
len = min_t(unsigned int, len, key_len);
if (put_user(len, optlen))
return -EFAULT;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 19ad9586c720..1bb85821f1e6 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -108,6 +108,29 @@ out:
return err;
}
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+ u64 *key)
+{
+ struct tcp_fastopen_context *ctx;
+ int n_keys = 0, i;
+
+ rcu_read_lock();
+ if (icsk)
+ ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
+ else
+ ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
+ if (ctx) {
+ n_keys = tcp_fastopen_context_len(ctx);
+ for (i = 0; i < n_keys; i++) {
+ put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
+ put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
+ }
+ }
+ rcu_read_unlock();
+
+ return n_keys;
+}
+
static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
struct sk_buff *syn,
const siphash_key_t *key,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a18c378ca5f4..d8f0102cec94 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -913,7 +913,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
struct metadata_dst *tun_dst,
bool log_ecn_err)
{
- return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
+ int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+ const struct ipv6hdr *ipv6h,
+ struct sk_buff *skb);
+
+ dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
+ if (tpi->proto == htons(ETH_P_IP))
+ dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
+
+ return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_err);
}
EXPORT_SYMBOL(ip6_tnl_rcv);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index af4cc5fb678e..05e966f1609e 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1050,7 +1050,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
might_sleep();
lockdep_assert_held(&local->sta_mtx);
- while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+ if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
WARN_ON_ONCE(ret);
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index aa6a603a2425..517f6a2ac15a 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -2066,14 +2066,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
- bool uses_ct = false, resched = false;
+ bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
unlikely(!atomic_read(&cp->dest->weight))) {
resched = true;
- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+ old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+ old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!atomic_read(&cp->n_control)) {
resched = true;
} else {
@@ -2081,15 +2081,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
* that uses conntrack while it is still
* referenced by controlled connection(s).
*/
- resched = !uses_ct;
+ resched = !old_ct;
}
}
if (resched) {
+ if (!old_ct)
+ cp->flags &= ~IP_VS_CONN_F_NFCT;
if (!atomic_read(&cp->n_control))
ip_vs_conn_expire_now(cp);
__ip_vs_conn_put(cp);
- if (uses_ct)
+ if (old_ct)
return NF_DROP;
cp = NULL;
}
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 07782836fad6..3c48cdc8935d 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
- *dest = (err >= 0);
+ nft_reg_store8(dest, err >= 0);
return;
} else if (err < 0) {
goto err;
@@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
- *dest = (err >= 0);
+ nft_reg_store8(dest, err >= 0);
return;
} else if (err < 0) {
goto err;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 951b6e87ed5d..7bc6537f3ccb 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -253,7 +253,7 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
return false;
break;
case NFT_META_IIFGROUP:
- if (!nft_meta_store_ifgroup(dest, nft_out(pkt)))
+ if (!nft_meta_store_ifgroup(dest, nft_in(pkt)))
return false;
break;
case NFT_META_OIFGROUP:
diff --git a/net/netlink/policy.c b/net/netlink/policy.c
index f6491853c797..2b3e26f7496f 100644
--- a/net/netlink/policy.c
+++ b/net/netlink/policy.c
@@ -51,6 +51,9 @@ static int add_policy(struct nl_policy_dump **statep,
if (!state)
return -ENOMEM;
+ memset(&state->policies[state->n_alloc], 0,
+ flex_array_size(state, policies, n_alloc - state->n_alloc));
+
state->policies[state->n_alloc].policy = policy;
state->policies[state->n_alloc].maxtype = maxtype;
state->n_alloc = n_alloc;
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index ba5ffd3badd3..b5c867fe3232 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -332,10 +332,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
return -ESOCKTNOSUPPORT;
- if (sock->type == SOCK_RAW)
+ if (sock->type == SOCK_RAW) {
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
sock->ops = &rawsock_raw_ops;
- else
+ } else {
sock->ops = &rawsock_ops;
+ }
sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
if (!sk)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 29bd405adbbd..301f41d4929b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -942,6 +942,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
}
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
+ __releases(&pkc->blk_fill_in_prog_lock)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
atomic_dec(&pkc->blk_fill_in_prog);
@@ -989,6 +990,7 @@ static void prb_fill_curr_block(char *curr,
struct tpacket_kbdq_core *pkc,
struct tpacket_block_desc *pbd,
unsigned int len)
+ __acquires(&pkc->blk_fill_in_prog_lock)
{
struct tpacket3_hdr *ppd;
@@ -2286,8 +2288,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (do_vnet &&
virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
- vio_le(), true, 0))
+ vio_le(), true, 0)) {
+ if (po->tp_version == TPACKET_V3)
+ prb_clear_blk_fill_status(&po->rx_ring);
goto drop_n_account;
+ }
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
@@ -2393,7 +2398,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
__clear_bit(slot_id, po->rx_ring.rx_owner_map);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
- } else {
+ } else if (po->tp_version == TPACKET_V3) {
prb_clear_blk_fill_status(&po->rx_ring);
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 300a104b9a0f..85ab4559f057 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -692,23 +692,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
*/
static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
{
+ u32 min_port;
int rc;
mutex_lock(&qrtr_port_lock);
if (!*port) {
- rc = idr_alloc(&qrtr_ports, ipc,
- QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
- GFP_ATOMIC);
- if (rc >= 0)
- *port = rc;
+ min_port = QRTR_MIN_EPH_SOCKET;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
+ if (!rc)
+ *port = min_port;
} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
rc = -EACCES;
} else if (*port == QRTR_PORT_CTRL) {
- rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+ min_port = 0;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
} else {
- rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
- if (rc >= 0)
- *port = rc;
+ min_port = *port;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
+ if (!rc)
+ *port = min_port;
}
mutex_unlock(&qrtr_port_lock);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6ed1652d1e26..41d8440deaf1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -704,7 +704,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
err = ip_defrag(net, skb, user);
local_bh_enable();
if (err && err != -EINPROGRESS)
- goto out_free;
+ return err;
if (!err)
*defrag = true;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index bda2536dd740..6dc95dcc0ff4 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -88,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
int ret;
if (outcnt <= stream->outcnt)
- return 0;
+ goto out;
ret = genradix_prealloc(&stream->out, outcnt, gfp);
if (ret)
return ret;
+out:
stream->outcnt = outcnt;
return 0;
}
@@ -104,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
int ret;
if (incnt <= stream->incnt)
- return 0;
+ goto out;
ret = genradix_prealloc(&stream->in, incnt, gfp);
if (ret)
return ret;
+out:
stream->incnt = incnt;
return 0;
}
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index e1f64f4ba236..da9ba6d1679b 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
(req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
struct smc_connection *conn = &smc->conn;
- struct smcd_diag_dmbinfo dinfo = {
- .linkid = *((u32 *)conn->lgr->id),
- .peer_gid = conn->lgr->peer_gid,
- .my_gid = conn->lgr->smcd->local_gid,
- .token = conn->rmb_desc->token,
- .peer_token = conn->peer_token
- };
+ struct smcd_diag_dmbinfo dinfo;
+
+ memset(&dinfo, 0, sizeof(dinfo));
+
+ dinfo.linkid = *((u32 *)conn->lgr->id);
+ dinfo.peer_gid = conn->lgr->peer_gid;
+ dinfo.my_gid = conn->lgr->smcd->local_gid;
+ dinfo.token = conn->rmb_desc->token;
+ dinfo.peer_token = conn->peer_token;
if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
goto errout;
diff --git a/net/socket.c b/net/socket.c
index 976426d03f09..481fd5f25669 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -500,7 +500,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
if (f.file) {
sock = sock_from_file(f.file, err);
if (likely(sock)) {
- *fput_needed = f.flags;
+ *fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
fdput(f);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index cf0fd170ac18..90b8329fef82 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -584,7 +584,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
buf->head[0].iov_len);
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
- buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
+ buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
/* Trim off the trailing "extra count" and checksum blob */
xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 46027d0c903f..c28051f7d217 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -958,7 +958,6 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
pad = priv_len - buf->len;
- buf->len -= pad;
/* The upper layers assume the buffer is aligned on 4-byte boundaries.
* In the krb5p case, at least, the data ends up offset, so we need to
* move it around. */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index e426fedb9524..ac16d83f2d26 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -265,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
{
struct svc_rdma_recv_ctxt *ctxt;
+ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
+ return 0;
ctxt = svc_rdma_recv_ctxt_get(rdma);
if (!ctxt)
return -ENOMEM;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 5eb35309ecef..83806fa94def 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -684,7 +684,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
struct svc_rdma_read_info *info,
__be32 *p)
{
- unsigned int i;
int ret;
ret = -EINVAL;
@@ -707,12 +706,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
info->ri_chunklen += rs_length;
}
- /* Pages under I/O have been copied to head->rc_pages.
- * Prevent their premature release by svc_xprt_release() .
- */
- for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
- rqstp->rq_pages[i] = NULL;
-
return ret;
}
@@ -807,6 +800,26 @@ out:
return ret;
}
+/* Pages under I/O have been copied to head->rc_pages. Ensure they
+ * are not released by svc_xprt_release() until the I/O is complete.
+ *
+ * This has to be done after all Read WRs are constructed to properly
+ * handle a page that is part of I/O on behalf of two different RDMA
+ * segments.
+ *
+ * Do this only if I/O has been posted. Otherwise, we do indeed want
+ * svc_xprt_release() to clean things up properly.
+ */
+static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
+ const unsigned int start,
+ const unsigned int num_pages)
+{
+ unsigned int i;
+
+ for (i = start; i < num_pages + start; i++)
+ rqstp->rq_pages[i] = NULL;
+}
+
/**
* svc_rdma_recv_read_chunk - Pull a Read chunk from the client
* @rdma: controlling RDMA transport
@@ -860,6 +873,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
if (ret < 0)
goto out_err;
+ svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
return 0;
out_err:
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index c8c47fc72653..d6426b6cc9c5 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -757,10 +757,12 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
switch (err) {
case 0:
this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ rcu_read_lock();
if (likely(test_bit(0, &b->up)))
b->media->send_msg(net, skb, b, &tx_ctx->dst);
else
kfree_skb(skb);
+ rcu_read_unlock();
break;
case -EINPROGRESS:
return;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 217516357ef2..90e3c70a91ad 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -275,8 +275,9 @@ err_out:
static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg)
{
- int err;
+ struct nlmsghdr *nlh;
struct sk_buff *arg;
+ int err;
if (msg->req_type && (!msg->req_size ||
!TLV_CHECK_TYPE(msg->req, msg->req_type)))
@@ -305,6 +306,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
}
+ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+ if (!nlh) {
+ kfree_skb(arg);
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return -EMSGSIZE;
+ }
+ nlmsg_end(arg, nlh);
+
err = __tipc_nl_compat_dumpit(cmd, msg, arg);
if (err) {
kfree_skb(msg->rep);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 0e55f8365ce2..0cbad566f281 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -561,7 +561,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct iov_iter msg_iter;
- char *kaddr = kmap(page);
+ char *kaddr;
struct kvec iov;
int rc;
@@ -576,6 +576,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
goto out;
}
+ kaddr = kmap(page);
iov.iov_base = kaddr + offset;
iov.iov_len = size;
iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 626bf9044418..6cd0df1c5caf 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1032,7 +1032,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
}
/* Connected sockets that can produce data can be written. */
- if (sk->sk_state == TCP_ESTABLISHED) {
+ if (transport && sk->sk_state == TCP_ESTABLISHED) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
bool space_avail_now = false;
int ret = transport->notify_poll_out(