summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/af_vsock.h2
-rw-r--r--include/net/bluetooth/hci.h22
-rw-r--r--include/net/bluetooth/hci_core.h158
-rw-r--r--include/net/bluetooth/hci_sync.h3
-rw-r--r--include/net/bluetooth/l2cap.h3
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/checksum.h2
-rw-r--r--include/net/ipv6.h9
-rw-r--r--include/net/mac80211.h26
-rw-r--r--include/net/mana/gdma.h11
-rw-r--r--include/net/netdev_queues.h6
-rw-r--r--include/net/netfilter/nf_conntrack.h15
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/net/netfilter/nft_fib.h30
-rw-r--r--include/net/page_pool/types.h6
-rw-r--r--include/net/sch_generic.h15
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h47
-rw-r--r--include/net/xdp_sock.h3
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/net/xsk_buff_pool.h2
22 files changed, 232 insertions, 143 deletions
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 9e85424c8343..70302c92d329 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -242,8 +242,8 @@ int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-#ifdef CONFIG_BPF_SYSCALL
extern struct proto vsock_proto;
+#ifdef CONFIG_BPF_SYSCALL
int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void __init vsock_bpf_build_proto(void);
#else
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5bb4eaa52e14..40fce4193cc1 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -353,6 +353,22 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+
+ /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is
+ * skipped. This is required for a subset of the CSR controller clones
+ * which erroneously claim to support it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_READ_VOICE_SETTING,
+
+ /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is
+ * skipped. This is required for a subset of the CSR controller clones
+ * which erroneously claim to support it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE,
};
/* HCI device flags */
@@ -683,7 +699,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
-#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1a
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
@@ -1897,6 +1913,8 @@ struct hci_cp_le_pa_create_sync {
__u8 sync_cte_type;
} __packed;
+#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045
+
#define HCI_OP_LE_PA_TERM_SYNC 0x2046
struct hci_cp_le_pa_term_sync {
__le16 handle;
@@ -2796,7 +2814,7 @@ struct hci_evt_le_create_big_complete {
__le16 bis_handle[];
} __packed;
-#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
+#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
struct hci_evt_le_big_sync_estabilished {
__u8 status;
__u8 handle;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ba7b52584770..3d1d7296aed9 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -29,6 +29,7 @@
#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
+#include <linux/srcu.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_sync.h>
@@ -338,6 +339,7 @@ struct adv_monitor {
struct hci_dev {
struct list_head list;
+ struct srcu_struct srcu;
struct mutex lock;
struct ida unset_handle_ida;
@@ -538,6 +540,7 @@ struct hci_dev {
struct hci_conn_hash conn_hash;
struct list_head mesh_pending;
+ struct mutex mgmt_pending_lock;
struct list_head mgmt_pending;
struct list_head reject_list;
struct list_head accept_list;
@@ -804,6 +807,7 @@ struct hci_conn_params {
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
+extern struct mutex hci_cb_list_lock;
#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
@@ -813,20 +817,20 @@ extern rwlock_t hci_dev_list_lock;
#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
-#define hci_dev_clear_volatile_flags(hdev) \
- do { \
- hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
- hci_dev_clear_flag(hdev, HCI_LE_ADV); \
- hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
- hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
- hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag((hdev), HCI_LE_SCAN); \
+ hci_dev_clear_flag((hdev), HCI_LE_ADV); \
+ hci_dev_clear_flag((hdev), HCI_LL_RPA_RESOLUTION); \
+ hci_dev_clear_flag((hdev), HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag((hdev), HCI_QUALITY_REPORT); \
} while (0)
#define hci_dev_le_state_simultaneous(hdev) \
- (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \
- (hdev->le_states[4] & 0x08) && /* Central */ \
- (hdev->le_states[4] & 0x40) && /* Peripheral */ \
- (hdev->le_states[3] & 0x10)) /* Simultaneous */
+ (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &(hdev)->quirks) && \
+ ((hdev)->le_states[4] & 0x08) && /* Central */ \
+ ((hdev)->le_states[4] & 0x40) && /* Peripheral */ \
+ ((hdev)->le_states[3] & 0x10)) /* Simultaneous */
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
@@ -1104,10 +1108,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
return NULL;
}
-static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
- __u8 sid,
- bdaddr_t *dst,
- __u8 dst_type)
+static inline struct hci_conn *
+hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1115,8 +1117,10 @@ static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK || bacmp(&c->dst, dst) ||
- c->dst_type != dst_type || c->sid != sid)
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
continue;
rcu_read_unlock();
@@ -1505,8 +1509,6 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
int hci_le_create_cis_pending(struct hci_dev *hdev);
-int hci_pa_create_sync_pending(struct hci_dev *hdev);
-int hci_le_big_create_sync_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -1547,9 +1549,9 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 data_len, __u8 *data);
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[]);
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1920,6 +1922,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
((dev)->commands[20] & 0x10 && \
!test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+#define read_voice_setting_capable(dev) \
+ ((dev)->commands[9] & 0x04 && \
+ !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks))
+
/* Use enhanced synchronous connection if command is supported and its quirk
* has not been set.
*/
@@ -2006,47 +2012,24 @@ struct hci_cb {
char *name;
- bool (*match) (struct hci_conn *conn);
void (*connect_cfm) (struct hci_conn *conn, __u8 status);
void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
void (*security_cfm) (struct hci_conn *conn, __u8 status,
- __u8 encrypt);
+ __u8 encrypt);
void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
};
-static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
-{
- struct hci_cb *cb, *cpy;
-
- rcu_read_lock();
- list_for_each_entry_rcu(cb, &hci_cb_list, list) {
- if (cb->match && cb->match(conn)) {
- cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
- if (!cpy)
- break;
-
- *cpy = *cb;
- INIT_LIST_HEAD(&cpy->list);
- list_add_rcu(&cpy->list, list);
- }
- }
- rcu_read_unlock();
-}
-
static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
{
- struct list_head list;
- struct hci_cb *cb, *tmp;
-
- INIT_LIST_HEAD(&list);
- hci_cb_lookup(conn, &list);
+ struct hci_cb *cb;
- list_for_each_entry_safe(cb, tmp, &list, list) {
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->connect_cfm)
cb->connect_cfm(conn, status);
- kfree(cb);
}
+ mutex_unlock(&hci_cb_list_lock);
if (conn->connect_cfm_cb)
conn->connect_cfm_cb(conn, status);
@@ -2054,43 +2037,22 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- struct list_head list;
- struct hci_cb *cb, *tmp;
-
- INIT_LIST_HEAD(&list);
- hci_cb_lookup(conn, &list);
+ struct hci_cb *cb;
- list_for_each_entry_safe(cb, tmp, &list, list) {
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->disconn_cfm)
cb->disconn_cfm(conn, reason);
- kfree(cb);
}
+ mutex_unlock(&hci_cb_list_lock);
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
}
-static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
- __u8 encrypt)
-{
- struct list_head list;
- struct hci_cb *cb, *tmp;
-
- INIT_LIST_HEAD(&list);
- hci_cb_lookup(conn, &list);
-
- list_for_each_entry_safe(cb, tmp, &list, list) {
- if (cb->security_cfm)
- cb->security_cfm(conn, status, encrypt);
- kfree(cb);
- }
-
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
-}
-
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
+ struct hci_cb *cb;
__u8 encrypt;
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
@@ -2098,11 +2060,20 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- hci_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
+ struct hci_cb *cb;
__u8 encrypt;
if (conn->state == BT_CONFIG) {
@@ -2129,38 +2100,40 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
conn->sec_level = conn->pending_sec_level;
}
- hci_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
+ }
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
- struct list_head list;
- struct hci_cb *cb, *tmp;
-
- INIT_LIST_HEAD(&list);
- hci_cb_lookup(conn, &list);
+ struct hci_cb *cb;
- list_for_each_entry_safe(cb, tmp, &list, list) {
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
- kfree(cb);
}
+ mutex_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
__u8 role)
{
- struct list_head list;
- struct hci_cb *cb, *tmp;
-
- INIT_LIST_HEAD(&list);
- hci_cb_lookup(conn, &list);
+ struct hci_cb *cb;
- list_for_each_entry_safe(cb, tmp, &list, list) {
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
- kfree(cb);
}
+ mutex_unlock(&hci_cb_list_lock);
}
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
@@ -2409,7 +2382,6 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
-void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
bdaddr_t *bdaddr, u8 addr_type);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index f3052cb252ef..dbabc17b30cd 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -186,3 +186,6 @@ int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
struct hci_conn_params *params);
+
+int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index d9c767cf773d..9189354c568f 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -668,7 +668,7 @@ struct l2cap_conn {
struct l2cap_chan *smp;
struct list_head chan_l;
- struct mutex chan_lock;
+ struct mutex lock;
struct kref ref;
struct list_head users;
};
@@ -970,6 +970,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
+struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 941dc62f3027..bb1862536f9c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -127,6 +127,8 @@ struct wiphy;
* even if it is otherwise disabled.
* @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation
* with very low power (VLP), even if otherwise set to NO_IR.
+ * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel,
+ * even if otherwise set to NO_IR.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = BIT(0),
@@ -155,6 +157,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23),
IEEE80211_CHAN_CAN_MONITOR = BIT(24),
IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25),
+ IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26),
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -2707,7 +2710,7 @@ struct cfg80211_scan_request {
s8 tsf_report_link_id;
/* keep last */
- struct ieee80211_channel *channels[] __counted_by(n_channels);
+ struct ieee80211_channel *channels[];
};
static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 1338cb92c8e7..28b101f26636 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -158,7 +158,7 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
bool pseudohdr);
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
- __wsum diff, bool pseudohdr);
+ __wsum diff, bool pseudohdr, bool ipv6);
static __always_inline
void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 248bfb26e2af..6d52b5584d2f 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -363,15 +363,6 @@ struct ipcm6_cookie {
struct ipv6_txoptions *opt;
};
-static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
-{
- *ipc6 = (struct ipcm6_cookie) {
- .hlimit = -1,
- .tclass = -1,
- .dontfrag = -1,
- };
-}
-
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
const struct sock *sk)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5b712582f9a9..8e7094160206 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2024 Intel Corporation
+ * Copyright (C) 2018 - 2025 Intel Corporation
*/
#ifndef MAC80211_H
@@ -2826,6 +2826,11 @@ struct ieee80211_txq {
* implements MLO, so operation can continue on other links when one
* link is switching.
*
+ * @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec
+ * but otherwise ignored/worked around for interoperability. This is a
+ * HW flag so drivers can opt in according to their own control, e.g. in
+ * testing.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2885,6 +2890,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DISALLOW_PUNCTURING,
IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
IEEE80211_HW_HANDLES_QUIET_CSA,
+ IEEE80211_HW_STRICT,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3797,7 +3803,7 @@ enum ieee80211_reconfig_type {
* @was_assoc: set if this call is due to deauth/disassoc
* while just having been associated
* @link_id: the link id on which the frame will be TX'ed.
- * Only used with the mgd_prepare_tx() method.
+ * 0 for a non-MLO connection.
*/
struct ieee80211_prep_tx_info {
u16 duration;
@@ -5306,22 +5312,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
int max_rates);
/**
- * ieee80211_sta_set_expected_throughput - set the expected tpt for a station
- *
- * Call this function to notify mac80211 about a change in expected throughput
- * to a station. A driver for a device that does rate control in firmware can
- * call this function when the expected throughput estimate towards a station
- * changes. The information is used to tune the CoDel AQM applied to traffic
- * going towards that station (which can otherwise be too aggressive and cause
- * slow stations to starve).
- *
- * @pubsta: the station to set throughput for.
- * @thr: the current expected throughput in kbps.
- */
-void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
- u32 thr);
-
-/**
* ieee80211_tx_rate_update - transmit rate update callback
*
* Drivers should call this functions with a non-NULL pub sta
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index de47fa533b15..6a0e83ac0fdb 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -406,8 +406,6 @@ struct gdma_context {
struct gdma_dev mana_ib;
};
-#define MAX_NUM_GDMA_DEVICES 4
-
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
{
return gd->dev_id.type == GDMA_DEVICE_MANA;
@@ -554,11 +552,15 @@ enum {
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+/* Driver can handle holes (zeros) in the device list */
+#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
- GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT)
+ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -619,11 +621,12 @@ struct gdma_query_max_resources_resp {
}; /* HW DATA */
/* GDMA_LIST_DEVICES */
+#define GDMA_DEV_LIST_SIZE 64
struct gdma_list_devices_resp {
struct gdma_resp_hdr hdr;
u32 num_of_devs;
u32 reserved;
- struct gdma_dev_id devs[64];
+ struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
}; /* HW DATA */
/* GDMA_REGISTER_DEVICE */
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 5ca019d294ca..173bcfcd868a 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -92,6 +92,12 @@ struct netdev_stat_ops {
struct netdev_queue_stats_tx *tx);
};
+void netdev_stat_queue_sum(struct net_device *netdev,
+ int rx_start, int rx_end,
+ struct netdev_queue_stats_rx *rx_sum,
+ int tx_start, int tx_end,
+ struct netdev_queue_stats_tx *tx_sum);
+
/**
* struct netdev_queue_mgmt_ops - netdev ops for queue management
*
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cba3ccf03fcc..8cb70e7485e2 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -308,8 +308,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
/* use after obtaining a reference count */
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
{
- return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
- !nf_ct_is_dying(ct);
+ if (!nf_ct_is_confirmed(ct))
+ return false;
+
+ /* load ct->timeout after is_confirmed() test.
+ * Pairs with __nf_conntrack_confirm() which:
+ * 1. Increases ct->timeout value
+ * 2. Inserts ct into rcu hlist
+ * 3. Sets the confirmed bit
+ * 4. Unlocks the hlist lock
+ */
+ smp_acquire__after_ctrl_dep();
+
+ return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
}
#define NF_CT_DAY (86400 * HZ)
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index b63d53bb9dd6..1a6fca013165 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -369,7 +369,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
{
- if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+ if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN))
return false;
*inner_proto = __nf_flow_pppoe_proto(skb);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 788513cc384b..757abcb54d11 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1889,7 +1889,7 @@ void nft_chain_filter_fini(void);
void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
-void nf_tables_trans_destroy_flush_work(void);
+void nf_tables_trans_destroy_flush_work(struct net *net);
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
__be64 nf_jiffies64_to_msecs(u64 input);
@@ -1903,6 +1903,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re
struct nftables_pernet {
struct list_head tables;
struct list_head commit_list;
+ struct list_head destroy_list;
struct list_head commit_set_list;
struct list_head binding_list;
struct list_head module_list;
@@ -1913,6 +1914,7 @@ struct nftables_pernet {
unsigned int base_seq;
unsigned int gc_seq;
u8 validate_state;
+ struct work_struct destroy_work;
};
extern unsigned int nf_tables_net_id;
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 38cae7113de4..7370fba844ef 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -2,6 +2,7 @@
#ifndef _NFT_FIB_H_
#define _NFT_FIB_H_
+#include <net/l3mdev.h>
#include <net/netfilter/nf_tables.h>
struct nft_fib {
@@ -18,6 +19,35 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
+static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt)
+{
+ const struct net_device *indev = nft_in(pkt);
+ const struct sock *sk;
+
+ switch (nft_hook(pkt)) {
+ case NF_INET_PRE_ROUTING:
+ case NF_INET_INGRESS:
+ case NF_INET_LOCAL_IN:
+ break;
+ default:
+ return false;
+ }
+
+ sk = pkt->skb->sk;
+ if (sk && sk_fullsock(sk))
+ return sk->sk_rx_dst_ifindex == indev->ifindex;
+
+ return nft_fib_is_loopback(pkt->skb, indev);
+}
+
+static inline int nft_fib_l3mdev_master_ifindex_rcu(const struct nft_pktinfo *pkt,
+ const struct net_device *iif)
+{
+ const struct net_device *dev = iif ? iif : pkt->skb->dev;
+
+ return l3mdev_master_ifindex_rcu(dev);
+}
+
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index c022c410abe3..f53e2c90b686 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -6,6 +6,7 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <net/netmem.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
@@ -33,6 +34,9 @@
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
+/* Index limit to stay within PP_DMA_INDEX_BITS for DMA indices */
+#define PP_DMA_INDEX_LIMIT XA_LIMIT(1, BIT(PP_DMA_INDEX_BITS) - 1)
+
/*
* Fast allocation side cache array/stack
*
@@ -216,6 +220,8 @@ struct page_pool {
void *mp_priv;
+ struct xarray dma_mapped;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 24e48af7e8f7..a9d7e9ecee6b 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1031,6 +1031,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
return skb;
}
+static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&sch->gso_skb);
+ if (skb) {
+ sch->q.qlen--;
+ return skb;
+ }
+ if (direct)
+ return __qdisc_dequeue_head(&sch->q);
+ else
+ return sch->dequeue(sch);
+}
+
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 31248cfdfb23..dcd288fa1bb6 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,6 +775,7 @@ struct sctp_transport {
/* Reference counting. */
refcount_t refcnt;
+ __u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@@ -784,7 +785,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag.
*/
- __u32 rto_pending:1,
+ rto_pending:1,
/*
* hb_sent : a flag that signals that we have a pending
diff --git a/include/net/sock.h b/include/net/sock.h
index fa055cf1785e..b7270b6b9e9c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -338,6 +338,8 @@ struct sk_filter;
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
* @sk_user_frags: xarray of pages the user is holding a reference on.
+ * @sk_owner: reference to the real owner of the socket that calls
+ * sock_lock_init_class_and_name().
*/
struct sock {
/*
@@ -544,6 +546,10 @@ struct sock {
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
struct xarray sk_user_frags;
+
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+ struct module *sk_owner;
+#endif
};
struct sock_bh_locked {
@@ -1585,6 +1591,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
sk_mem_reclaim(sk);
}
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+ __module_get(owner);
+ sk->sk_owner = owner;
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+ sk->sk_owner = NULL;
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+ module_put(sk->sk_owner);
+}
+#else
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+}
+#endif
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -1594,13 +1629,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
+ sk_owner_set(sk, THIS_MODULE); \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
- sizeof((sk)->sk_lock)); \
+ sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
- (skey), (sname)); \
+ (skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
@@ -2906,8 +2942,11 @@ int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
static inline bool sk_is_readable(struct sock *sk)
{
- if (sk->sk_prot->sock_is_readable)
- return sk->sk_prot->sock_is_readable(sk);
+ const struct proto *prot = READ_ONCE(sk->sk_prot);
+
+ if (prot->sock_is_readable)
+ return prot->sock_is_readable(sk);
+
return false;
}
#endif /* _SOCK_H */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index bfe625b55d55..df3f5f07bc7c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -71,9 +71,6 @@ struct xdp_sock {
*/
u32 tx_budget_spent;
- /* Protects generic receive. */
- spinlock_t rx_lock;
-
/* Statistics */
u64 rx_dropped;
u64 rx_queue_full;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 83e9ef25b8d0..1484dd15a369 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -233,7 +233,6 @@ struct xfrm_state {
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
- struct sock __rcu *encap_sk;
/* NAT keepalive */
u32 nat_keepalive_interval; /* seconds */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index bacb33f1e3e5..823fd5c7a3b1 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -55,6 +55,8 @@ struct xsk_buff_pool {
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
+ /* Protects generic receive in shared and non-shared umem mode. */
+ spinlock_t rx_lock;
struct list_head free_list;
struct list_head xskb_list;
u32 heads_cnt;