diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-06 01:44:45 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-06 01:44:45 +0300 |
commit | 6843306689aff3aea608e4d2630b2a5a0137f827 (patch) | |
tree | fe09492b0dbe15c73ca837317ef687922a999312 /net | |
parent | 73a3fcdaa73200e38e38f7e8a32c9b901c5b95b5 (diff) | |
parent | cc7eab25b1cf3f9594fe61142d3523ce4d14a788 (diff) | |
download | linux-6843306689aff3aea608e4d2630b2a5a0137f827.tar.xz |
Merge tag 'net-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Including fixes from bluetooth, bpf and wireguard.
Current release - regressions:
- nvme-tcp: fix comma-related oops after sendpage changes
Current release - new code bugs:
- ptp: make max_phase_adjustment sysfs device attribute invisible
when not supported
Previous releases - regressions:
- sctp: fix potential deadlock on &net->sctp.addr_wq_lock
- mptcp:
- ensure subflow is unhashed before cleaning the backlog
- do not rely on implicit state check in mptcp_listen()
Previous releases - always broken:
- net: fix net_dev_start_xmit trace event vs skb_transport_offset()
- Bluetooth:
- fix use-bdaddr-property quirk
- L2CAP: fix multiple UaFs
- ISO: use hci_sync for setting CIG parameters
- hci_event: fix Set CIG Parameters error status handling
- hci_event: fix parsing of CIS Established Event
- MGMT: fix marking SCAN_RSP as not connectable
- wireguard: queuing: use saner cpu selection wrapping
- sched: act_ipt: various bug fixes for iptables <> TC interactions
- sched: act_pedit: add size check for TCA_PEDIT_PARMS_EX
- dsa: fixes for receiving PTP packets with 8021q and sja1105 tagging
- eth: sfc: fix null-deref in devlink port without MAE access
- eth: ibmvnic: do not reset dql stats on NON_FATAL err
Misc:
- xsk: honor SO_BINDTODEVICE on bind"
* tag 'net-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (70 commits)
nfp: clean mc addresses in application firmware when closing port
selftests: mptcp: pm_nl_ctl: fix 32-bit support
selftests: mptcp: depend on SYN_COOKIES
selftests: mptcp: userspace_pm: report errors with 'remove' tests
selftests: mptcp: userspace_pm: use correct server port
selftests: mptcp: sockopt: return error if wrong mark
selftests: mptcp: sockopt: use 'iptables-legacy' if available
selftests: mptcp: connect: fail if nft supposed to work
mptcp: do not rely on implicit state check in mptcp_listen()
mptcp: ensure subflow is unhashed before cleaning the backlog
s390/qeth: Fix vipa deletion
octeontx-af: fix hardware timestamp configuration
net: dsa: sja1105: always enable the send_meta options
net: dsa: tag_sja1105: fix MAC DA patching from meta frames
net: Replace strlcpy with strscpy
pptp: Fix fib lookup calls.
mlxsw: spectrum_router: Fix an IS_ERR() vs NULL check
net/sched: act_pedit: Add size check for TCA_PEDIT_PARMS_EX
xsk: Honor SO_BINDTODEVICE on bind
ptp: Make max_phase_adjustment sysfs device attribute invisible when not supported
...
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/hci_conn.c | 49 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 67 | ||||
-rw-r--r-- | net/bluetooth/hci_sync.c | 30 | ||||
-rw-r--r-- | net/bluetooth/hci_sysfs.c | 14 | ||||
-rw-r--r-- | net/bluetooth/iso.c | 4 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 5 | ||||
-rw-r--r-- | net/bluetooth/l2cap_sock.c | 2 | ||||
-rw-r--r-- | net/bridge/br_if.c | 5 | ||||
-rw-r--r-- | net/dsa/tag_sja1105.c | 90 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 12 | ||||
-rw-r--r-- | net/mptcp/protocol.c | 7 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 70 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 1 | ||||
-rw-r--r-- | net/sctp/socket.c | 4 | ||||
-rw-r--r-- | net/xdp/xsk.c | 5 |
15 files changed, 230 insertions, 135 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 1ef952bda97d..056f9516e46d 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -775,6 +775,11 @@ static void le_conn_timeout(struct work_struct *work) hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); } +struct iso_cig_params { + struct hci_cp_le_set_cig_params cp; + struct hci_cis_params cis[0x1f]; +}; + struct iso_list_data { union { u8 cig; @@ -786,10 +791,7 @@ struct iso_list_data { u16 sync_handle; }; int count; - struct { - struct hci_cp_le_set_cig_params cp; - struct hci_cis_params cis[0x11]; - } pdu; + struct iso_cig_params pdu; }; static void bis_list(struct hci_conn *conn, void *data) @@ -1764,10 +1766,33 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); } +static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err) +{ + struct iso_cig_params *pdu = data; + + bt_dev_dbg(hdev, ""); + + if (err) + bt_dev_err(hdev, "Unable to set CIG parameters: %d", err); + + kfree(pdu); +} + +static int set_cig_params_sync(struct hci_dev *hdev, void *data) +{ + struct iso_cig_params *pdu = data; + u32 plen; + + plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]); + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu, + HCI_CMD_TIMEOUT); +} + static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) { struct hci_dev *hdev = conn->hdev; struct iso_list_data data; + struct iso_cig_params *pdu; memset(&data, 0, sizeof(data)); @@ -1837,11 +1862,15 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) return false; - if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS, - sizeof(data.pdu.cp) + - (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)), - &data.pdu) < 0) + pdu = kmemdup(&data.pdu, sizeof(*pdu), GFP_KERNEL); + if (!pdu) + return false; + + if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu, + set_cig_params_complete) < 0) { + kfree(pdu); return false; + } return true; } @@ -2044,10 +2073,10 @@ static int create_big_sync(struct hci_dev *hdev, void *data) flags |= MGMT_ADV_FLAG_SEC_2M; /* Align intervals */ - interval = qos->bcast.out.interval / 1250; + interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor; if (qos->bcast.bis) - sync_interval = qos->bcast.sync_interval * 1600; + sync_interval = interval * 4; err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, conn->le_per_adv_data, flags, interval, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 09ba6d8987ee..95816a938cea 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3812,7 +3812,8 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); - if (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id) { + if (!rp->status && (!cp || rp->num_handles != cp->num_cis || + rp->cig_id != cp->cig_id)) { bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); status = HCI_ERROR_UNSPECIFIED; } @@ -6316,23 +6317,18 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } - /* When receiving non-connectable or scannable undirected - * advertising reports, this means that the remote device is - * not connectable and then clearly indicate this in the - * device found event. - * - * When receiving a scan response, then there is no way to + /* When receiving a scan response, then there is no way to * know if the remote device is connectable or not. However * since scan responses are merged with a previously seen * advertising report, the flags field from that report * will be used. * - * In the really unlikely case that a controller get confused - * and just sends a scan response event, then it is marked as - * not connectable as well. + * In the unlikely case that a controller just sends a scan + * response event that doesn't match the pending report, then + * it is marked as a standalone SCAN_RSP. */ if (type == LE_ADV_SCAN_RSP) - flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; + flags = MGMT_DEV_FOUND_SCAN_RSP; /* If there's nothing pending either store the data from this * event or send an immediate device found event if the data @@ -6790,6 +6786,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, { struct hci_evt_le_cis_established *ev = data; struct hci_conn *conn; + struct bt_iso_qos *qos; u16 handle = __le16_to_cpu(ev->handle); bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); @@ -6811,21 +6808,39 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, goto unlock; } - if (conn->role == HCI_ROLE_SLAVE) { - __le32 interval; - - memset(&interval, 0, sizeof(interval)); - - memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); - conn->iso_qos.ucast.in.interval = le32_to_cpu(interval); - memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); - conn->iso_qos.ucast.out.interval = le32_to_cpu(interval); - conn->iso_qos.ucast.in.latency = le16_to_cpu(ev->interval); - conn->iso_qos.ucast.out.latency = le16_to_cpu(ev->interval); - conn->iso_qos.ucast.in.sdu = le16_to_cpu(ev->c_mtu); - conn->iso_qos.ucast.out.sdu = le16_to_cpu(ev->p_mtu); - conn->iso_qos.ucast.in.phy = ev->c_phy; - conn->iso_qos.ucast.out.phy = ev->p_phy; + qos = &conn->iso_qos; + + /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */ + qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250; + qos->ucast.out.interval = qos->ucast.in.interval; + + switch (conn->role) { + case HCI_ROLE_SLAVE: + /* Convert Transport Latency (us) to Latency (msec) */ + qos->ucast.in.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), + 1000); + qos->ucast.out.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), + 1000); + qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu); + qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu); + qos->ucast.in.phy = ev->c_phy; + qos->ucast.out.phy = ev->p_phy; + break; + case HCI_ROLE_MASTER: + /* Convert Transport Latency (us) to Latency (msec) */ + qos->ucast.out.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), + 1000); + qos->ucast.in.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), + 1000); + qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu); + qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu); + qos->ucast.out.phy = ev->c_phy; + qos->ucast.in.phy = ev->p_phy; + break; } if (!ev->status) { diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 804cde43b4e0..8561616abbe5 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -4623,26 +4623,18 @@ static int hci_dev_setup_sync(struct hci_dev *hdev) * BD_ADDR invalid before creating the HCI device or in * its setup callback. */ - invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); - + invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || + test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); if (!ret) { - if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { - if (!bacmp(&hdev->public_addr, BDADDR_ANY)) - hci_dev_get_bd_addr_from_property(hdev); - - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) { - ret = hdev->set_bdaddr(hdev, - &hdev->public_addr); - - /* If setting of the BD_ADDR from the device - * property succeeds, then treat the address - * as valid even if the invalid BD_ADDR - * quirk indicates otherwise. - */ - if (!ret) - invalid_bdaddr = false; - } + if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && + !bacmp(&hdev->public_addr, BDADDR_ANY)) + hci_dev_get_bd_addr_from_property(hdev); + + if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) { + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + if (!ret) + invalid_bdaddr = false; } } diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 2934d7f4d564..15b33579007c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -6,7 +6,9 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> -static struct class *bt_class; +static const struct class bt_class = { + .name = "bluetooth", +}; static void bt_link_release(struct device *dev) { @@ -36,7 +38,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn) BT_DBG("conn %p", conn); conn->dev.type = &bt_link; - conn->dev.class = bt_class; + conn->dev.class = &bt_class; conn->dev.parent = &hdev->dev; device_initialize(&conn->dev); @@ -104,7 +106,7 @@ void hci_init_sysfs(struct hci_dev *hdev) struct device *dev = &hdev->dev; dev->type = &bt_host; - dev->class = bt_class; + dev->class = &bt_class; __module_get(THIS_MODULE); device_initialize(dev); @@ -112,12 +114,10 @@ void hci_init_sysfs(struct hci_dev *hdev) int __init bt_sysfs_init(void) { - bt_class = class_create("bluetooth"); - - return PTR_ERR_OR_ZERO(bt_class); + return class_register(&bt_class); } void bt_sysfs_cleanup(void) { - class_destroy(bt_class); + class_unregister(&bt_class); } diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 34d55a85d8f6..0e6cc57b3911 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -704,7 +704,7 @@ static struct bt_iso_qos default_qos = { .bcast = { .big = BT_ISO_QOS_BIG_UNSET, .bis = BT_ISO_QOS_BIS_UNSET, - .sync_interval = 0x00, + .sync_factor = 0x01, .packing = 0x00, .framing = 0x00, .in = DEFAULT_IO_QOS, @@ -1213,7 +1213,7 @@ static bool check_ucast_qos(struct bt_iso_qos *qos) static bool check_bcast_qos(struct bt_iso_qos *qos) { - if (qos->bcast.sync_interval > 0x07) + if (qos->bcast.sync_factor == 0x00) return false; if (qos->bcast.packing > 0x01) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c5e8798e297c..17ca13e8c044 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6374,9 +6374,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (!chan) goto done; + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index eebe256104bc..947ca580bb9a 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -46,6 +46,7 @@ static const struct proto_ops l2cap_sock_ops; static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { @@ -1415,6 +1416,7 @@ static int l2cap_sock_release(struct socket *sock) if (!sk) return 0; + l2cap_sock_cleanup_listen(sk); bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 3f04b40f6056..2450690f98cf 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -166,8 +166,9 @@ void br_manage_promisc(struct net_bridge *br) * This lets us disable promiscuous mode and write * this config to hw. */ - if (br->auto_cnt == 0 || - (br->auto_cnt == 1 && br_auto_port(p))) + if ((p->dev->priv_flags & IFF_UNICAST_FLT) && + (br->auto_cnt == 0 || + (br->auto_cnt == 1 && br_auto_port(p)))) br_port_clear_promisc(p); else br_port_set_promisc(p); diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index a5f3b73da417..ade3eeb2f3e6 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -58,11 +58,8 @@ #define SJA1110_TX_TRAILER_LEN 4 #define SJA1110_MAX_PADDING_LEN 15 -#define SJA1105_HWTS_RX_EN 0 - struct sja1105_tagger_private { struct sja1105_tagger_data data; /* Must be first */ - unsigned long state; /* Protects concurrent access to the meta state machine * from taggers running on multiple ports on SMP systems */ @@ -118,8 +115,8 @@ static void sja1105_meta_unpack(const struct sk_buff *skb, * a unified unpacking command for both device series. */ packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0); - packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0); - packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0); + packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0); + packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0); packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0); packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0); } @@ -392,10 +389,6 @@ static struct sk_buff priv = sja1105_tagger_private(ds); - if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) - /* Do normal processing. */ - return skb; - spin_lock(&priv->meta_lock); /* Was this a link-local frame instead of the meta * that we were expecting? @@ -431,12 +424,6 @@ static struct sk_buff priv = sja1105_tagger_private(ds); - /* Drop the meta frame if we're not in the right state - * to process it. - */ - if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) - return NULL; - spin_lock(&priv->meta_lock); stampable_skb = priv->stampable_skb; @@ -472,30 +459,6 @@ static struct sk_buff return skb; } -static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds) -{ - struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); - - return test_bit(SJA1105_HWTS_RX_EN, &priv->state); -} - -static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on) -{ - struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); - - if (on) - set_bit(SJA1105_HWTS_RX_EN, &priv->state); - else - clear_bit(SJA1105_HWTS_RX_EN, &priv->state); - - /* Initialize the meta state machine to a known state */ - if (!priv->stampable_skb) - return; - - kfree_skb(priv->stampable_skb); - priv->stampable_skb = NULL; -} - static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb) { u16 tpid = ntohs(eth_hdr(skb)->h_proto); @@ -545,33 +508,53 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, is_link_local = sja1105_is_link_local(skb); is_meta = sja1105_is_meta_frame(skb); - if (sja1105_skb_has_tag_8021q(skb)) { - /* Normal traffic path. */ - sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); - } else if (is_link_local) { + if (is_link_local) { /* Management traffic path. Switch embeds the switch ID and * port ID into bytes of the destination MAC, courtesy of * the incl_srcpt options. */ source_port = hdr->h_dest[3]; switch_id = hdr->h_dest[4]; - /* Clear the DMAC bytes that were mangled by the switch */ - hdr->h_dest[3] = 0; - hdr->h_dest[4] = 0; } else if (is_meta) { sja1105_meta_unpack(skb, &meta); source_port = meta.source_port; switch_id = meta.switch_id; - } else { + } + + /* Normal data plane traffic and link-local frames are tagged with + * a tag_8021q VLAN which we have to strip + */ + if (sja1105_skb_has_tag_8021q(skb)) { + int tmp_source_port = -1, tmp_switch_id = -1; + + sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid, + &vid); + /* Preserve the source information from the INCL_SRCPT option, + * if available. This allows us to not overwrite a valid source + * port and switch ID with zeroes when receiving link-local + * frames from a VLAN-unaware bridged port (non-zero vbid) or a + * VLAN-aware bridged port (non-zero vid). Furthermore, the + * tag_8021q source port information is only of trust when the + * vbid is 0 (precise port). Otherwise, tmp_source_port and + * tmp_switch_id will be zeroes. + */ + if (vbid == 0 && source_port == -1) + source_port = tmp_source_port; + if (vbid == 0 && switch_id == -1) + switch_id = tmp_switch_id; + } else if (source_port == -1 && switch_id == -1) { + /* Packets with no source information have no chance of + * getting accepted, drop them straight away. + */ return NULL; } - if (vbid >= 1) + if (source_port != -1 && switch_id != -1) + skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); + else if (vbid >= 1) skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); - else if (source_port == -1 || switch_id == -1) - skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); else - skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); + skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); if (!skb->dev) { netdev_warn(netdev, "Couldn't decode source port\n"); return NULL; @@ -762,7 +745,6 @@ static void sja1105_disconnect(struct dsa_switch *ds) static int sja1105_connect(struct dsa_switch *ds) { - struct sja1105_tagger_data *tagger_data; struct sja1105_tagger_private *priv; struct kthread_worker *xmit_worker; int err; @@ -782,10 +764,6 @@ static int sja1105_connect(struct dsa_switch *ds) } priv->xmit_worker = xmit_worker; - /* Export functions for switch driver use */ - tagger_data = &priv->data; - tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state; - tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state; ds->tagger_data = priv; return 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6f072095211e..57c8af1859c1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3590,8 +3590,11 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, u32 *last_oow_ack_time) { - if (*last_oow_ack_time) { - s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time); + /* Paired with the WRITE_ONCE() in this function. */ + u32 val = READ_ONCE(*last_oow_ack_time); + + if (val) { + s32 elapsed = (s32)(tcp_jiffies32 - val); if (0 <= elapsed && elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) { @@ -3600,7 +3603,10 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, } } - *last_oow_ack_time = tcp_jiffies32; + /* Paired with the prior READ_ONCE() and with itself, + * as we might be lockless. + */ + WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32); return false; /* not rate-limited: go ahead, send dupack now! */ } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index e892673deb73..3613489eb6e3 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2909,10 +2909,10 @@ static void mptcp_check_listen_stop(struct sock *sk) return; lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); + tcp_set_state(ssk, TCP_CLOSE); mptcp_subflow_queue_clean(sk, ssk); inet_csk_listen_stop(ssk); mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); - tcp_set_state(ssk, TCP_CLOSE); release_sock(ssk); } @@ -3703,6 +3703,11 @@ static int mptcp_listen(struct socket *sock, int backlog) pr_debug("msk=%p", msk); lock_sock(sk); + + err = -EINVAL; + if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) + goto unlock; + ssock = __mptcp_nmpc_socket(msk); if (IS_ERR(ssock)) { err = PTR_ERR(ssock); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 5d96ffebd40f..598d6e299152 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -21,6 +21,7 @@ #include <linux/tc_act/tc_ipt.h> #include <net/tc_act/tc_ipt.h> #include <net/tc_wrapper.h> +#include <net/ip.h> #include <linux/netfilter_ipv4/ip_tables.h> @@ -48,7 +49,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t, par.entryinfo = &e; par.target = target; par.targinfo = t->data; - par.hook_mask = hook; + par.hook_mask = 1 << hook; par.family = NFPROTO_IPV4; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); @@ -85,7 +86,8 @@ static void tcf_ipt_release(struct tc_action *a) static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ }, - [TCA_IPT_HOOK] = { .type = NLA_U32 }, + [TCA_IPT_HOOK] = NLA_POLICY_RANGE(NLA_U32, NF_INET_PRE_ROUTING, + NF_INET_NUMHOOKS), [TCA_IPT_INDEX] = { .type = NLA_U32 }, [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, }; @@ -158,15 +160,27 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, return -EEXIST; } } + + err = -EINVAL; hook = nla_get_u32(tb[TCA_IPT_HOOK]); + switch (hook) { + case NF_INET_PRE_ROUTING: + break; + case NF_INET_POST_ROUTING: + break; + default: + goto err1; + } + + if (tb[TCA_IPT_TABLE]) { + /* mangle only for now */ + if (nla_strcmp(tb[TCA_IPT_TABLE], "mangle")) + goto err1; + } - err = -ENOMEM; - tname = kmalloc(IFNAMSIZ, GFP_KERNEL); + tname = kstrdup("mangle", GFP_KERNEL); if (unlikely(!tname)) goto err1; - if (tb[TCA_IPT_TABLE] == NULL || - nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ) - strcpy(tname, "mangle"); t = kmemdup(td, td->u.target_size, GFP_KERNEL); if (unlikely(!t)) @@ -217,10 +231,31 @@ static int tcf_xt_init(struct net *net, struct nlattr *nla, a, &act_xt_ops, tp, flags); } +static bool tcf_ipt_act_check(struct sk_buff *skb) +{ + const struct iphdr *iph; + unsigned int nhoff, len; + + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + return false; + + nhoff = skb_network_offset(skb); + iph = ip_hdr(skb); + if (iph->ihl < 5 || iph->version != 4) + return false; + + len = skb_ip_totlen(skb); + if (skb->len < nhoff + len || len < (iph->ihl * 4u)) + return false; + + return pskb_may_pull(skb, iph->ihl * 4u); +} + TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { + char saved_cb[sizeof_field(struct sk_buff, cb)]; int ret = 0, result = 0; struct tcf_ipt *ipt = to_ipt(a); struct xt_action_param par; @@ -231,9 +266,24 @@ TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb, .pf = NFPROTO_IPV4, }; + if (skb_protocol(skb, false) != htons(ETH_P_IP)) + return TC_ACT_UNSPEC; + if (skb_unclone(skb, GFP_ATOMIC)) return TC_ACT_UNSPEC; + if (!tcf_ipt_act_check(skb)) + return TC_ACT_UNSPEC; + + if (state.hook == NF_INET_POST_ROUTING) { + if (!skb_dst(skb)) + return TC_ACT_UNSPEC; + + state.out = skb->dev; + } + + memcpy(saved_cb, skb->cb, sizeof(saved_cb)); + spin_lock(&ipt->tcf_lock); tcf_lastuse_update(&ipt->tcf_tm); @@ -246,6 +296,9 @@ TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb, par.state = &state; par.target = ipt->tcfi_t->u.kernel.target; par.targinfo = ipt->tcfi_t->data; + + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + ret = par.target->target(skb, &par); switch (ret) { @@ -266,6 +319,9 @@ TC_INDIRECT_SCOPE int tcf_ipt_act(struct sk_buff *skb, break; } spin_unlock(&ipt->tcf_lock); + + memcpy(skb->cb, saved_cb, sizeof(skb->cb)); + return result; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b562fc2bb5b1..1ef8fcfa9997 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -29,6 +29,7 @@ static struct tc_action_ops act_pedit_ops; static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) }, + [TCA_PEDIT_PARMS_EX] = { .len = sizeof(struct tc_pedit) }, [TCA_PEDIT_KEYS_EX] = { .type = NLA_NESTED }, }; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6554a357fe33..9388d98aebc0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -364,9 +364,9 @@ static void sctp_auto_asconf_init(struct sctp_sock *sp) struct net *net = sock_net(&sp->inet.sk); if (net->sctp.default_auto_asconf) { - spin_lock(&net->sctp.addr_wq_lock); + spin_lock_bh(&net->sctp.addr_wq_lock); list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); - spin_unlock(&net->sctp.addr_wq_lock); + spin_unlock_bh(&net->sctp.addr_wq_lock); sp->do_auto_asconf = 1; } } diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 5a8c0dd250af..31dca4ecb2c5 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -886,6 +886,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); struct net_device *dev; + int bound_dev_if; u32 flags, qid; int err = 0; @@ -899,6 +900,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) XDP_USE_NEED_WAKEUP)) return -EINVAL; + bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); + if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) + return -EINVAL; + rtnl_lock(); mutex_lock(&xs->mutex); if (xs->state != XSK_READY) { |