summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c17
-rw-r--r--net/bluetooth/hci_event.c15
-rw-r--r--net/bluetooth/hci_sync.c25
-rw-r--r--net/bluetooth/iso.c16
-rw-r--r--net/bluetooth/mgmt.c12
-rw-r--r--net/bridge/br_multicast.c16
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/dev.c12
-rw-r--r--net/hsr/hsr_slave.c8
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c6
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c5
-rw-r--r--net/ipv6/seg6_hmac.c6
-rw-r--r--net/mptcp/options.c6
-rw-r--r--net/mptcp/pm.c18
-rw-r--r--net/mptcp/pm_kernel.c1
-rw-r--r--net/sched/sch_cake.c14
-rw-r--r--net/sched/sch_codel.c12
-rw-r--r--net/sched/sch_dualpi2.c5
-rw-r--r--net/sched/sch_fq.c12
-rw-r--r--net/sched/sch_fq_codel.c12
-rw-r--r--net/sched/sch_fq_pie.c12
-rw-r--r--net/sched/sch_hhf.c12
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_pie.c12
-rw-r--r--net/smc/af_smc.c3
-rw-r--r--net/tls/tls_sw.c7
26 files changed, 182 insertions, 86 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7d1e79f69cd1..7a879290dd28 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -339,7 +339,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
case BT_CODEC_TRANSPARENT:
if (!find_next_esco_param(conn, esco_param_msbc,
ARRAY_SIZE(esco_param_msbc)))
- return false;
+ return -EINVAL;
+
param = &esco_param_msbc[conn->attempt - 1];
cp.tx_coding_format.id = 0x03;
cp.rx_coding_format.id = 0x03;
@@ -830,7 +831,17 @@ static void bis_cleanup(struct hci_conn *conn)
/* Check if ISO connection is a BIS and terminate advertising
* set and BIG if there are no other connections using it.
*/
- bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
+ bis = hci_conn_hash_lookup_big_state(hdev,
+ conn->iso_qos.bcast.big,
+ BT_CONNECTED,
+ HCI_ROLE_MASTER);
+ if (bis)
+ return;
+
+ bis = hci_conn_hash_lookup_big_state(hdev,
+ conn->iso_qos.bcast.big,
+ BT_CONNECT,
+ HCI_ROLE_MASTER);
if (bis)
return;
@@ -2249,7 +2260,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
* the start periodic advertising and create BIG commands have
* been queued
*/
- hci_conn_hash_list_state(hdev, bis_mark_per_adv, PA_LINK,
+ hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,
BT_BOUND, &data);
/* Queue start periodic advertising and create BIG */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8aa5039b975a..fe7cdd67ad2a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -6745,8 +6745,8 @@ static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
qos->ucast.out.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
1000);
- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
+ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
+ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
qos->ucast.in.phy = ev->c_phy;
qos->ucast.out.phy = ev->p_phy;
break;
@@ -6760,8 +6760,8 @@ static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
qos->ucast.in.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
1000);
- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
+ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
+ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
qos->ucast.out.phy = ev->c_phy;
qos->ucast.in.phy = ev->p_phy;
break;
@@ -6957,9 +6957,14 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
continue;
}
- if (ev->status != 0x42)
+ if (ev->status != 0x42) {
/* Mark PA sync as established */
set_bit(HCI_CONN_PA_SYNC, &bis->flags);
+ /* Reset cleanup callback of PA Sync so it doesn't
+ * terminate the sync when deleting the connection.
+ */
+ conn->cleanup = NULL;
+ }
bis->sync_handle = conn->sync_handle;
bis->iso_qos.bcast.big = ev->handle;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 2b4f21fbf9c1..31d72b9683ef 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -3344,7 +3344,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
* advertising data. This also applies to the case
* where BR/EDR was toggled during the AUTO_OFF phase.
*/
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
list_empty(&hdev->adv_instances)) {
if (ext_adv_capable(hdev)) {
err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
@@ -4531,14 +4531,14 @@ static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
{
struct hci_cp_le_set_host_feature cp;
- if (!cis_capable(hdev))
+ if (!iso_capable(hdev))
return 0;
memset(&cp, 0, sizeof(cp));
/* Connected Isochronous Channels (Host Support) */
cp.bit_number = 32;
- cp.bit_value = 1;
+ cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@@ -6985,8 +6985,6 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
hci_dev_lock(hdev);
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
-
if (!hci_conn_valid(hdev, conn))
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
@@ -7047,10 +7045,13 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
/* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
* it.
*/
- if (conn->sid == HCI_SID_INVALID)
- __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
- HCI_EV_LE_EXT_ADV_REPORT,
- conn->conn_timeout, NULL);
+ if (conn->sid == HCI_SID_INVALID) {
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
+ HCI_EV_LE_EXT_ADV_REPORT,
+ conn->conn_timeout, NULL);
+ if (err == -ETIMEDOUT)
+ goto done;
+ }
memset(&cp, 0, sizeof(cp));
cp.options = qos->bcast.options;
@@ -7080,6 +7081,12 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
0, NULL, HCI_CMD_TIMEOUT);
+done:
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+
+ /* Update passive scan since HCI_PA_SYNC flag has been cleared */
+ hci_update_passive_scan_sync(hdev);
+
return err;
}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 7bd3aa0a6db9..5ce823ca3aaf 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1347,7 +1347,7 @@ static int iso_sock_getname(struct socket *sock, struct sockaddr *addr,
bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst);
sa->iso_bdaddr_type = iso_pi(sk)->dst_type;
- if (hcon && hcon->type == BIS_LINK) {
+ if (hcon && (hcon->type == BIS_LINK || hcon->type == PA_LINK)) {
sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid;
sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis;
memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis,
@@ -2483,11 +2483,11 @@ static const struct net_proto_family iso_sock_family_ops = {
.create = iso_sock_create,
};
-static bool iso_inited;
+static bool inited;
-bool iso_enabled(void)
+bool iso_inited(void)
{
- return iso_inited;
+ return inited;
}
int iso_init(void)
@@ -2496,7 +2496,7 @@ int iso_init(void)
BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr));
- if (iso_inited)
+ if (inited)
return -EALREADY;
err = proto_register(&iso_proto, 0);
@@ -2524,7 +2524,7 @@ int iso_init(void)
iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs,
NULL, &iso_debugfs_fops);
- iso_inited = true;
+ inited = true;
return 0;
@@ -2535,7 +2535,7 @@ error:
int iso_exit(void)
{
- if (!iso_inited)
+ if (!inited)
return -EALREADY;
bt_procfs_cleanup(&init_net, "iso");
@@ -2549,7 +2549,7 @@ int iso_exit(void)
proto_unregister(&iso_proto);
- iso_inited = false;
+ inited = false;
return 0;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 1ce682038b51..3166f5fb876b 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -922,19 +922,19 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
settings |= MGMT_SETTING_WIDEBAND_SPEECH;
- if (cis_central_capable(hdev))
+ if (cis_central_enabled(hdev))
settings |= MGMT_SETTING_CIS_CENTRAL;
- if (cis_peripheral_capable(hdev))
+ if (cis_peripheral_enabled(hdev))
settings |= MGMT_SETTING_CIS_PERIPHERAL;
- if (bis_capable(hdev))
+ if (bis_enabled(hdev))
settings |= MGMT_SETTING_ISO_BROADCASTER;
- if (sync_recv_capable(hdev))
+ if (sync_recv_enabled(hdev))
settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
- if (ll_privacy_capable(hdev))
+ if (ll_privacy_enabled(hdev))
settings |= MGMT_SETTING_LL_PRIVACY;
return settings;
@@ -4513,7 +4513,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
}
if (IS_ENABLED(CONFIG_BT_LE)) {
- flags = iso_enabled() ? BIT(0) : 0;
+ flags = iso_inited() ? BIT(0) : 0;
memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags);
idx++;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 1377f31b719c..8ce145938b02 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -4818,6 +4818,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
}
+ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
+ br_info(brmctx->br,
+ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
+ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
+ }
+
brmctx->multicast_query_interval = intvl_jiffies;
}
@@ -4834,6 +4842,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
}
+ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
+ br_info(brmctx->br,
+ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
+ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
+ }
+
brmctx->multicast_startup_query_interval = intvl_jiffies;
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b159aae594c0..8de0904b9627 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -31,6 +31,8 @@
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
+#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
+#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
#define BR_HWDOM_MAX BITS_PER_LONG
diff --git a/net/core/dev.c b/net/core/dev.c
index 5a3c0f40a93f..93a25d87b86b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3779,6 +3779,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
features &= ~NETIF_F_TSO_MANGLEID;
}
+ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
+ * so neither does TSO that depends on it.
+ */
+ if (features & NETIF_F_IPV6_CSUM &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
+ !ipv6_has_hopopt_jumbo(skb))
+ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
+
return features;
}
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index b87b6a6fe070..102eccf5ead7 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -63,8 +63,14 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
- protocol == htons(ETH_P_HSR))
+ protocol == htons(ETH_P_HSR)) {
+ if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) {
+ kfree_skb(skb);
+ goto finish_consume;
+ }
+
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
+ }
skb_reset_mac_len(skb);
/* Only the frames received over the interlink port will assign a
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 87fd945a0d27..0d3cb2ba6fc8 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
if (!oth)
return;
- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
- nf_reject_fill_skb_dst(oldskb) < 0)
+ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
if (iph->frag_off & htons(IP_OFFSET))
return;
- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
- nf_reject_fill_skb_dst(skb_in) < 0)
+ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
return;
if (skb_csum_unnecessary(skb_in) ||
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 838295fa32e3..cb2d38e80de9 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
+ if (!skb_dst(oldskb)) {
nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
if (!dst)
return;
@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
- nf_reject6_fill_skb_dst(skb_in) < 0)
+ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
return;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index f78ecb6ad838..fd58426f222b 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -35,6 +35,7 @@
#include <net/xfrm.h>
#include <crypto/hash.h>
+#include <crypto/utils.h>
#include <net/seg6.h>
#include <net/genetlink.h>
#include <net/seg6_hmac.h>
@@ -280,7 +281,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output))
return false;
- if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0)
+ if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN))
return false;
return true;
@@ -304,6 +305,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
struct seg6_pernet_data *sdata = seg6_pernet(net);
int err;
+ if (!__hmac_get_algo(hinfo->alg_id))
+ return -EINVAL;
+
err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
rht_params);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 70c0ab0ecf90..2a8ea28442b2 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -1118,7 +1118,9 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
return hmac == mp_opt->ahmac;
}
-/* Return false if a subflow has been reset, else return true */
+/* Return false in case of error (or subflow has been reset),
+ * else return true.
+ */
bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -1222,7 +1224,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
if (!mpext)
- return true;
+ return false;
memset(mpext, 0, sizeof(*mpext));
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 420d416e2603..136a380602ca 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -274,6 +274,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
add_timer);
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
+ unsigned int timeout;
pr_debug("msk=%p\n", msk);
@@ -291,6 +292,10 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
goto out;
}
+ timeout = mptcp_get_add_addr_timeout(sock_net(sk));
+ if (!timeout)
+ goto out;
+
spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal_addr(msk)) {
@@ -302,7 +307,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
sk_reset_timer(sk, timer,
- jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
+ jiffies + timeout);
spin_unlock_bh(&msk->pm.lock);
@@ -344,6 +349,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
struct mptcp_pm_add_entry *add_entry = NULL;
struct sock *sk = (struct sock *)msk;
struct net *net = sock_net(sk);
+ unsigned int timeout;
lockdep_assert_held(&msk->pm.lock);
@@ -353,9 +359,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
return false;
- sk_reset_timer(sk, &add_entry->add_timer,
- jiffies + mptcp_get_add_addr_timeout(net));
- return true;
+ goto reset_timer;
}
add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
@@ -369,8 +373,10 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
add_entry->retrans_times = 0;
timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
- sk_reset_timer(sk, &add_entry->add_timer,
- jiffies + mptcp_get_add_addr_timeout(net));
+reset_timer:
+ timeout = mptcp_get_add_addr_timeout(net);
+ if (timeout)
+ sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout);
return true;
}
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index d39e7c178460..667803d72b64 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -1085,7 +1085,6 @@ static void __flush_addrs(struct list_head *list)
static void __reset_counters(struct pm_nl_pernet *pernet)
{
WRITE_ONCE(pernet->add_addr_signal_max, 0);
- WRITE_ONCE(pernet->add_addr_accept_max, 0);
WRITE_ONCE(pernet->local_addr_max, 0);
pernet->addrs = 0;
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index dbcfb948c867..32bacfc314c2 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1750,7 +1750,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
- u32 idx;
+ u32 idx, tin;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@@ -1760,6 +1760,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free);
return ret;
}
+ tin = (u32)(b - q->tins);
idx--;
flow = &b->flows[idx];
@@ -1927,13 +1928,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) {
+ bool same_flow = false;
u32 dropped = 0;
+ u32 drop_id;
while (q->buffer_used > q->buffer_limit) {
dropped++;
- cake_drop(sch, to_free);
+ drop_id = cake_drop(sch, to_free);
+
+ if ((drop_id >> 16) == tin &&
+ (drop_id & 0xFFFF) == idx)
+ same_flow = true;
}
b->drop_overlimit += dropped;
+
+ if (same_flow)
+ return NET_XMIT_CN;
}
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index c93761040c6e..fa0314679e43 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -101,9 +101,9 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
static int codel_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
- unsigned int qlen, dropped = 0;
int err;
err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
@@ -142,15 +142,17 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->params.ecn,
!!nla_get_u32(tb[TCA_CODEL_ECN]));
- qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
- dropped += qdisc_pkt_len(skb);
- qdisc_qstats_backlog_dec(sch, skb);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_dualpi2.c b/net/sched/sch_dualpi2.c
index 845375ebd4ea..4b975feb52b1 100644
--- a/net/sched/sch_dualpi2.c
+++ b/net/sched/sch_dualpi2.c
@@ -927,7 +927,8 @@ static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
q->sch = sch;
dualpi2_reset_default(sch);
- hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED_SOFT);
if (opt && nla_len(opt)) {
err = dualpi2_change(sch, opt, extack);
@@ -937,7 +938,7 @@ static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
}
hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
- HRTIMER_MODE_ABS_PINNED);
+ HRTIMER_MODE_ABS_PINNED_SOFT);
return 0;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 902ff5470607..fee922da2f99 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1013,11 +1013,11 @@ static int fq_load_priomap(struct fq_sched_data *q,
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1];
- int err, drop_count = 0;
- unsigned drop_len = 0;
u32 fq_log;
+ int err;
err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
NULL);
@@ -1135,16 +1135,18 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
err = fq_resize(sch, fq_log);
sch_tree_lock(sch);
}
+
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
if (!skb)
break;
- drop_len += qdisc_pkt_len(skb);
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
- drop_count++;
}
- qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return err;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 2a0f3a513bfa..a14142392939 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -366,6 +366,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct fq_codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
u32 quantum = 0;
@@ -443,13 +444,14 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
q->memory_usage > q->memory_limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
- q->cstats.drop_len += qdisc_pkt_len(skb);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
- q->cstats.drop_count++;
}
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
- q->cstats.drop_count = 0;
- q->cstats.drop_len = 0;
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index b0e34daf1f75..7b96bc3ff891 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -287,10 +287,9 @@ begin:
static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
- unsigned int len_dropped = 0;
- unsigned int num_dropped = 0;
int err;
err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
@@ -368,11 +367,14 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
- len_dropped += qdisc_pkt_len(skb);
- num_dropped += 1;
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
}
- qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 5aa434b46707..2d4855e28a28 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -508,9 +508,9 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
- unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
@@ -561,15 +561,17 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
usecs_to_jiffies(us));
}
- qlen = sch->q.qlen;
- prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c968ea763774..b5e40c51655a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->leaf.q);
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index ad46ee3ed5a9..0a377313b6a9 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -141,9 +141,9 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
static int pie_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1];
- unsigned int qlen, dropped = 0;
int err;
err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
@@ -193,15 +193,17 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */
- qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
- dropped += qdisc_pkt_len(skb);
- qdisc_qstats_backlog_dec(sch, skb);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 9311c38f7abe..e0e48f24cd61 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -2568,8 +2568,9 @@ static void smc_listen_work(struct work_struct *work)
goto out_decl;
}
- smc_listen_out_connected(new_smc);
SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
+ /* smc_listen_out() will release smcsk */
+ smc_listen_out_connected(new_smc);
goto out_free;
out_unlock:
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 51c98a007dda..bac65d0d4e3e 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1808,6 +1808,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
+/* All records returned from a recvmsg() call must have the same type.
+ * 0 is not a valid content type. Use it as "no type reported, yet".
+ */
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
u8 *control)
{
@@ -2051,8 +2054,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0)
goto end;
+ /* process_rx_list() will set @control if it processed any records */
copied = err;
- if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
+ if (len <= copied || rx_more ||
+ (control && control != TLS_RECORD_TYPE_DATA))
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);