summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/act_api.h10
-rw-r--r--include/net/amt.h385
-rw-r--r--include/net/ax25.h13
-rw-r--r--include/net/bluetooth/bluetooth.h90
-rw-r--r--include/net/bluetooth/hci.h117
-rw-r--r--include/net/bluetooth/hci_core.h75
-rw-r--r--include/net/busy_poll.h3
-rw-r--r--include/net/cfg80211.h79
-rw-r--r--include/net/codel.h5
-rw-r--r--include/net/codel_impl.h18
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/devlink.h128
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dsa.h46
-rw-r--r--include/net/flow_dissector.h1
-rw-r--r--include/net/gen_stats.h59
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_ecn.h17
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/ioam6.h3
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip_vs.h11
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/llc.h2
-rw-r--r--include/net/llc_if.h3
-rw-r--r--include/net/mac80211.h11
-rw-r--r--include/net/mctp.h82
-rw-r--r--include/net/mctpdevice.h21
-rw-r--r--include/net/mptcp.h4
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/neighbour.h45
-rw-r--r--include/net/netfilter/nf_tables.h10
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h7
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h6
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/page_pool.h12
-rw-r--r--include/net/pkt_cls.h6
-rw-r--r--include/net/rose.h8
-rw-r--r--include/net/sch_generic.h86
-rw-r--r--include/net/sctp/sctp.h7
-rw-r--r--include/net/sock.h129
-rw-r--r--include/net/switchdev.h48
-rw-r--r--include/net/tcp.h52
-rw-r--r--include/net/tls.h5
-rw-r--r--include/net/xdp.h8
-rw-r--r--include/net/xdp_sock_drv.h22
-rw-r--r--include/net/xsk_buff_pool.h48
47 files changed, 1276 insertions, 428 deletions
diff --git a/include/net/act_api.h b/include/net/act_api.h
index f19f7f4a463c..b5b624c7e488 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -30,13 +30,13 @@ struct tc_action {
atomic_t tcfa_bindcnt;
int tcfa_action;
struct tcf_t tcfa_tm;
- struct gnet_stats_basic_packed tcfa_bstats;
- struct gnet_stats_basic_packed tcfa_bstats_hw;
+ struct gnet_stats_basic_sync tcfa_bstats;
+ struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
{
if (likely(a->cpu_bstats)) {
- bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return;
}
spin_lock(&a->tcfa_lock);
diff --git a/include/net/amt.h b/include/net/amt.h
new file mode 100644
index 000000000000..7a4db8b903ee
--- /dev/null
+++ b/include/net/amt.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com>
+ */
+#ifndef _NET_AMT_H_
+#define _NET_AMT_H_
+
+#include <linux/siphash.h>
+#include <linux/jhash.h>
+
+enum amt_msg_type {
+ AMT_MSG_DISCOVERY = 1,
+ AMT_MSG_ADVERTISEMENT,
+ AMT_MSG_REQUEST,
+ AMT_MSG_MEMBERSHIP_QUERY,
+ AMT_MSG_MEMBERSHIP_UPDATE,
+ AMT_MSG_MULTICAST_DATA,
+ AMT_MSG_TEARDOWM,
+ __AMT_MSG_MAX,
+};
+
+#define AMT_MSG_MAX (__AMT_MSG_MAX - 1)
+
+enum amt_ops {
+ /* A*B */
+ AMT_OPS_INT,
+ /* A+B */
+ AMT_OPS_UNI,
+ /* A-B */
+ AMT_OPS_SUB,
+ /* B-A */
+ AMT_OPS_SUB_REV,
+ __AMT_OPS_MAX,
+};
+
+#define AMT_OPS_MAX (__AMT_OPS_MAX - 1)
+
+enum amt_filter {
+ AMT_FILTER_FWD,
+ AMT_FILTER_D_FWD,
+ AMT_FILTER_FWD_NEW,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_FILTER_ALL,
+ AMT_FILTER_NONE_NEW,
+ AMT_FILTER_BOTH,
+ AMT_FILTER_BOTH_NEW,
+ __AMT_FILTER_MAX,
+};
+
+#define AMT_FILTER_MAX (__AMT_FILTER_MAX - 1)
+
+enum amt_act {
+ AMT_ACT_GMI,
+ AMT_ACT_GMI_ZERO,
+ AMT_ACT_GT,
+ AMT_ACT_STATUS_FWD_NEW,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ AMT_ACT_STATUS_NONE_NEW,
+ __AMT_ACT_MAX,
+};
+
+#define AMT_ACT_MAX (__AMT_ACT_MAX - 1)
+
+enum amt_status {
+ AMT_STATUS_INIT,
+ AMT_STATUS_SENT_DISCOVERY,
+ AMT_STATUS_RECEIVED_DISCOVERY,
+ AMT_STATUS_SENT_ADVERTISEMENT,
+ AMT_STATUS_RECEIVED_ADVERTISEMENT,
+ AMT_STATUS_SENT_REQUEST,
+ AMT_STATUS_RECEIVED_REQUEST,
+ AMT_STATUS_SENT_QUERY,
+ AMT_STATUS_RECEIVED_QUERY,
+ AMT_STATUS_SENT_UPDATE,
+ AMT_STATUS_RECEIVED_UPDATE,
+ __AMT_STATUS_MAX,
+};
+
+#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
+
+struct amt_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 type:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 version:4,
+ type:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_header_discovery {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_advertisement {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved:24;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ reserved:24;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+ __be32 ip4;
+} __packed;
+
+struct amt_header_request {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 type:4,
+ version:4,
+ reserved1:7,
+ p:1,
+ reserved2:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 version:4,
+ type:4,
+ p:1,
+ reserved1:7,
+ reserved2:16;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_query {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:6,
+ l:1,
+ g:1,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ g:1,
+ l:1,
+ reserved:6,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_membership_update {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 type:4,
+ version:4,
+ reserved:8,
+ response_mac:48;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u64 version:4,
+ type:4,
+ reserved:8,
+ response_mac:48;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 nonce;
+} __packed;
+
+struct amt_header_mcast_data {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type:4,
+ version:4,
+ reserved:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 version:4,
+ type:4,
+ reserved:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct amt_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_advertisement advertisement;
+ struct amt_header_request request;
+ struct amt_header_membership_query query;
+ struct amt_header_membership_update update;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_gw_headers {
+ union {
+ struct amt_header_discovery discovery;
+ struct amt_header_request request;
+ struct amt_header_membership_update update;
+ };
+} __packed;
+
+struct amt_relay_headers {
+ union {
+ struct amt_header_advertisement advertisement;
+ struct amt_header_membership_query query;
+ struct amt_header_mcast_data data;
+ };
+} __packed;
+
+struct amt_skb_cb {
+ struct amt_tunnel_list *tunnel;
+};
+
+struct amt_tunnel_list {
+ struct list_head list;
+ /* Protect All resources under an amt_tunne_list */
+ spinlock_t lock;
+ struct amt_dev *amt;
+ u32 nr_groups;
+ u32 nr_sources;
+ enum amt_status status;
+ struct delayed_work gc_wq;
+ __be16 source_port;
+ __be32 ip4;
+ __be32 nonce;
+ siphash_key_t key;
+ u64 mac:48,
+ reserved:16;
+ struct rcu_head rcu;
+ struct hlist_head groups[];
+};
+
+union amt_addr {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+};
+
+/* RFC 3810
+ *
+ * When the router is in EXCLUDE mode, the router state is represented
+ * by the notation EXCLUDE (X,Y), where X is called the "Requested List"
+ * and Y is called the "Exclude List". All sources, except those from
+ * the Exclude List, will be forwarded by the router
+ */
+enum amt_source_status {
+ AMT_SOURCE_STATUS_NONE,
+ /* Node of Requested List */
+ AMT_SOURCE_STATUS_FWD,
+ /* Node of Exclude List */
+ AMT_SOURCE_STATUS_D_FWD,
+};
+
+/* protected by gnode->lock */
+struct amt_source_node {
+ struct hlist_node node;
+ struct amt_group_node *gnode;
+ struct delayed_work source_timer;
+ union amt_addr source_addr;
+ enum amt_source_status status;
+#define AMT_SOURCE_OLD 0
+#define AMT_SOURCE_NEW 1
+ u8 flags;
+ struct rcu_head rcu;
+};
+
+/* Protected by amt_tunnel_list->lock */
+struct amt_group_node {
+ struct amt_dev *amt;
+ union amt_addr group_addr;
+ union amt_addr host_addr;
+ bool v6;
+ u8 filter_mode;
+ u32 nr_sources;
+ struct amt_tunnel_list *tunnel_list;
+ struct hlist_node node;
+ struct delayed_work group_timer;
+ struct rcu_head rcu;
+ struct hlist_head sources[];
+};
+
+struct amt_dev {
+ struct net_device *dev;
+ struct net_device *stream_dev;
+ struct net *net;
+ /* Global lock for amt device */
+ spinlock_t lock;
+ /* Used only in relay mode */
+ struct list_head tunnel_list;
+ struct gro_cells gro_cells;
+
+ /* Protected by RTNL */
+ struct delayed_work discovery_wq;
+ /* Protected by RTNL */
+ struct delayed_work req_wq;
+ /* Protected by RTNL */
+ struct delayed_work secret_wq;
+ /* AMT status */
+ enum amt_status status;
+ /* Generated key */
+ siphash_key_t key;
+ struct socket __rcu *sock;
+ u32 max_groups;
+ u32 max_sources;
+ u32 hash_buckets;
+ u32 hash_seed;
+ /* Default 128 */
+ u32 max_tunnels;
+ /* Default 128 */
+ u32 nr_tunnels;
+ /* Gateway or Relay mode */
+ u32 mode;
+ /* Default 2268 */
+ __be16 relay_port;
+ /* Default 2268 */
+ __be16 gw_port;
+ /* Outer local ip */
+ __be32 local_ip;
+ /* Outer remote ip */
+ __be32 remote_ip;
+ /* Outer discovery ip */
+ __be32 discovery_ip;
+ /* Only used in gateway mode */
+ __be32 nonce;
+ /* Gateway sent request and received query */
+ bool ready4;
+ bool ready6;
+ u8 req_cnt;
+ u8 qi;
+ u64 qrv;
+ u64 qri;
+ /* Used only in gateway mode */
+ u64 mac:48,
+ reserved:16;
+};
+
+#define AMT_TOS 0xc0
+#define AMT_IPHDR_OPTS 4
+#define AMT_IP6HDR_OPTS 8
+#define AMT_GC_INTERVAL (30 * 1000)
+#define AMT_MAX_GROUP 32
+#define AMT_MAX_SOURCE 128
+#define AMT_HSIZE_SHIFT 8
+#define AMT_HSIZE (1 << AMT_HSIZE_SHIFT)
+
+#define AMT_DISCOVERY_TIMEOUT 5000
+#define AMT_INIT_REQ_TIMEOUT 1
+#define AMT_INIT_QUERY_INTERVAL 125
+#define AMT_MAX_REQ_TIMEOUT 120
+#define AMT_MAX_REQ_COUNT 3
+#define AMT_SECRET_TIMEOUT 60000
+#define IANA_AMT_UDP_PORT 2268
+#define AMT_MAX_TUNNELS 128
+#define AMT_MAX_REQS 128
+#define AMT_GW_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_gw_headers))
+#define AMT_RELAY_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ sizeof(struct amt_relay_headers))
+
+static inline bool netif_is_amt(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "amt");
+}
+
+static inline u64 amt_gmi(const struct amt_dev *amt)
+{
+ return ((amt->qrv * amt->qi) + amt->qri) * 1000;
+}
+
+#endif /* _NET_AMT_H_ */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 8b7eb46ad72d..03d409de61ad 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -304,7 +304,7 @@ extern spinlock_t ax25_list_lock;
void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *,
struct net_device *);
void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
void ax25_destroy_socket(ax25_cb *);
@@ -384,10 +384,11 @@ struct ax25_linkfail {
void ax25_linkfail_register(struct ax25_linkfail *lf);
void ax25_linkfail_release(struct ax25_linkfail *lf);
-int __must_check ax25_listen_register(ax25_address *, struct net_device *);
-void ax25_listen_release(ax25_address *, struct net_device *);
+int __must_check ax25_listen_register(const ax25_address *,
+ struct net_device *);
+void ax25_listen_release(const ax25_address *, struct net_device *);
int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-int ax25_listen_mine(ax25_address *, struct net_device *);
+int ax25_listen_mine(const ax25_address *, struct net_device *);
void ax25_link_failed(ax25_cb *, int);
int ax25_protocol_is_registered(unsigned int);
@@ -401,8 +402,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
- ax25_digi *, struct net_device *);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *,
+ ax25_address *, ax25_digi *, struct net_device *);
void ax25_output(ax25_cb *, int, struct sk_buff *);
void ax25_kick(ax25_cb *);
void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 9125effbf448..3271870fd85e 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -153,6 +153,30 @@ struct bt_voice {
#define BT_SCM_PKT_STATUS 0x03
+#define BT_CODEC 19
+
+struct bt_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct bt_codec {
+ __u8 id;
+ __u16 cid;
+ __u16 vid;
+ __u8 data_path;
+ __u8 num_caps;
+} __packed;
+
+struct bt_codecs {
+ __u8 num_codecs;
+ struct bt_codec codecs[];
+} __packed;
+
+#define BT_CODEC_CVSD 0x02
+#define BT_CODEC_TRANSPARENT 0x03
+#define BT_CODEC_MSBC 0x05
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -420,6 +444,72 @@ out:
return NULL;
}
+/* Shall not be called with lock_sock held */
+static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb;
+ size_t size = min_t(size_t, len, mtu);
+ int err;
+
+ skb = bt_skb_send_alloc(sk, size + headroom + tailroom,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(err);
+
+ skb_reserve(skb, headroom);
+ skb_tailroom_reserve(skb, mtu, tailroom);
+
+ if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) {
+ kfree_skb(skb);
+ return ERR_PTR(-EFAULT);
+ }
+
+ skb->priority = sk->sk_priority;
+
+ return skb;
+}
+
+/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments
+ * accourding to the MTU.
+ */
+static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len, size_t mtu,
+ size_t headroom, size_t tailroom)
+{
+ struct sk_buff *skb, **frag;
+
+ skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR_OR_NULL(skb))
+ return skb;
+
+ len -= skb->len;
+ if (!len)
+ return skb;
+
+ /* Add remaining data over MTU as continuation fragments */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
+ if (IS_ERR(tmp)) {
+ kfree_skb(skb);
+ return tmp;
+ }
+
+ len -= tmp->len;
+
+ *frag = tmp;
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+}
+
int bt_to_errno(u16 code);
void hci_sock_set_flag(struct sock *sk, int nr);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b80415011dcd..63065bc01b76 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -330,6 +330,8 @@ enum {
HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
+ HCI_QUALITY_REPORT,
+ HCI_OFFLOAD_CODECS_ENABLED,
__HCI_NUM_FLAGS,
};
@@ -871,6 +873,40 @@ struct hci_cp_logical_link_cancel {
__u8 flow_spec_id;
} __packed;
+#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+struct hci_coding_format {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+} __packed;
+
+struct hci_cp_enhanced_setup_sync_conn {
+ __le16 handle;
+ __le32 tx_bandwidth;
+ __le32 rx_bandwidth;
+ struct hci_coding_format tx_coding_format;
+ struct hci_coding_format rx_coding_format;
+ __le16 tx_codec_frame_size;
+ __le16 rx_codec_frame_size;
+ __le32 in_bandwidth;
+ __le32 out_bandwidth;
+ struct hci_coding_format in_coding_format;
+ struct hci_coding_format out_coding_format;
+ __le16 in_coded_data_size;
+ __le16 out_coded_data_size;
+ __u8 in_pcm_data_format;
+ __u8 out_pcm_data_format;
+ __u8 in_pcm_sample_payload_msb_pos;
+ __u8 out_pcm_sample_payload_msb_pos;
+ __u8 in_data_path;
+ __u8 out_data_path;
+ __u8 in_transport_unit_size;
+ __u8 out_transport_unit_size;
+ __le16 max_latency;
+ __le16 pkt_type;
+ __u8 retrans_effort;
+} __packed;
+
struct hci_rp_logical_link_cancel {
__u8 status;
__u8 phy_handle;
@@ -1250,6 +1286,14 @@ struct hci_rp_read_local_oob_ext_data {
__u8 rand256[16];
} __packed;
+#define HCI_CONFIGURE_DATA_PATH 0x0c83
+struct hci_op_configure_data_path {
+ __u8 direction;
+ __u8 data_path_id;
+ __u8 vnd_len;
+ __u8 vnd_data[];
+} __packed;
+
#define HCI_OP_READ_LOCAL_VERSION 0x1001
struct hci_rp_read_local_version {
__u8 status;
@@ -1307,6 +1351,28 @@ struct hci_rp_read_data_block_size {
} __packed;
#define HCI_OP_READ_LOCAL_CODECS 0x100b
+struct hci_std_codecs {
+ __u8 num;
+ __u8 codec[];
+} __packed;
+
+struct hci_vnd_codec {
+ /* company id */
+ __le16 cid;
+ /* vendor codec id */
+ __le16 vid;
+} __packed;
+
+struct hci_vnd_codecs {
+ __u8 num;
+ struct hci_vnd_codec codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs {
+ __u8 status;
+ struct hci_std_codecs std_codecs;
+ struct hci_vnd_codecs vnd_codecs;
+} __packed;
#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c
struct hci_rp_read_local_pairing_opts {
@@ -1315,6 +1381,54 @@ struct hci_rp_read_local_pairing_opts {
__u8 max_key_size;
} __packed;
+#define HCI_OP_READ_LOCAL_CODECS_V2 0x100d
+struct hci_std_codec_v2 {
+ __u8 id;
+ __u8 transport;
+} __packed;
+
+struct hci_std_codecs_v2 {
+ __u8 num;
+ struct hci_std_codec_v2 codec[];
+} __packed;
+
+struct hci_vnd_codec_v2 {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+} __packed;
+
+struct hci_vnd_codecs_v2 {
+ __u8 num;
+ struct hci_vnd_codec_v2 codec[];
+} __packed;
+
+struct hci_rp_read_local_supported_codecs_v2 {
+ __u8 status;
+ struct hci_std_codecs_v2 std_codecs;
+ struct hci_vnd_codecs_v2 vendor_codecs;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e
+struct hci_op_read_local_codec_caps {
+ __u8 id;
+ __le16 cid;
+ __le16 vid;
+ __u8 transport;
+ __u8 direction;
+} __packed;
+
+struct hci_codec_caps {
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+struct hci_rp_read_local_codec_caps {
+ __u8 status;
+ __u8 num_caps;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b
struct hci_rp_read_page_scan_activity {
__u8 status;
@@ -2551,6 +2665,9 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
+/* codec transport types */
+#define HCI_TRANSPORT_SCO_ESCO 0x01
+
/* le24 support */
static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
{
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a7360c8c72f8..dd8840e70e25 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -131,6 +131,17 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct codec_list {
+ struct list_head list;
+ u8 id;
+ __u16 cid;
+ __u16 vid;
+ u8 transport;
+ u8 num_caps;
+ u32 len;
+ struct hci_codec_caps caps[];
+};
+
struct bdaddr_list_with_irk {
struct list_head list;
bdaddr_t bdaddr;
@@ -536,6 +547,7 @@ struct hci_dev {
struct list_head pend_le_conns;
struct list_head pend_le_reports;
struct list_head blocked_keys;
+ struct list_head local_codecs;
struct hci_dev_stats stat;
@@ -605,7 +617,12 @@ struct hci_dev {
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
void (*cmd_timeout)(struct hci_dev *hdev);
- bool (*prevent_wake)(struct hci_dev *hdev);
+ bool (*wakeup)(struct hci_dev *hdev);
+ int (*set_quality_report)(struct hci_dev *hdev, bool enable);
+ int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path);
+ int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -699,6 +716,7 @@ struct hci_conn {
struct amp_mgr *amp_mgr;
struct hci_conn *link;
+ struct bt_codec codec;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
@@ -760,6 +778,7 @@ extern struct mutex hci_cb_list_lock;
hci_dev_clear_flag(hdev, HCI_LE_ADV); \
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0)
/* ----- HCI interface to upper protocols ----- */
@@ -1099,13 +1118,14 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role, bdaddr_t *direct_rpa);
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role,
+ bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting);
+ __u16 setting, struct bt_codec *codec);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1360,6 +1380,8 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
u16 scan_rsp_len, u8 *scan_rsp_data);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance);
+bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance);
void hci_adv_monitors_clear(struct hci_dev *hdev);
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
@@ -1442,6 +1464,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Use LL Privacy based address resolution if supported */
#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+/* Use enhanced synchronous connection if command is supported */
+#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
+
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
@@ -1609,43 +1634,6 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
mutex_unlock(&hci_cb_list_lock);
}
-static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type,
- size_t *data_len)
-{
- size_t parsed = 0;
-
- if (eir_len < 2)
- return NULL;
-
- while (parsed < eir_len - 1) {
- u8 field_len = eir[0];
-
- if (field_len == 0)
- break;
-
- parsed += field_len + 1;
-
- if (parsed > eir_len)
- break;
-
- if (eir[1] != type) {
- eir += field_len + 1;
- continue;
- }
-
- /* Zero length data */
- if (field_len == 1)
- return NULL;
-
- if (data_len)
- *data_len = field_len - 1;
-
- return &eir[2];
- }
-
- return NULL;
-}
-
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
{
if (addr_type != ADDR_LE_DEV_RANDOM)
@@ -1867,4 +1855,9 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
#define SCO_AIRMODE_CVSD 0x0000
#define SCO_AIRMODE_TRANSP 0x0003
+#define LOCAL_CODEC_ACL_MASK BIT(0)
+#define LOCAL_CODEC_SCO_MASK BIT(1)
+
+#define TRANSPORT_TYPE_MAX 0x04
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 40296ed976a9..4202c609bb0b 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -130,7 +130,8 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
sk_rx_queue_set(sk, skb);
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 27336fc70467..423f97b982ff 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -740,6 +740,22 @@ struct cfg80211_tid_config {
};
/**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+};
+
+/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
*
@@ -1041,6 +1057,36 @@ struct cfg80211_crypto_settings {
};
/**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+};
+
+/**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
+};
+
+/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
* or %NULL if not changed
@@ -1058,6 +1104,7 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@ -1075,6 +1122,7 @@ struct cfg80211_beacon_data {
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@ -1189,6 +1237,7 @@ enum cfg80211_ap_settings_flags {
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1221,6 +1270,7 @@ struct cfg80211_ap_settings {
struct cfg80211_he_bss_color he_bss_color;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
};
/**
@@ -4018,6 +4068,10 @@ struct mgmt_frame_regs {
* @set_sar_specs: Update the SAR (TX power) settings.
*
* @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4348,6 +4402,8 @@ struct cfg80211_ops {
int (*color_change)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
};
/*
@@ -4981,6 +5037,13 @@ struct wiphy_iftype_akm_suites {
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
* @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
*/
struct wiphy {
struct mutex mtx;
@@ -5125,6 +5188,9 @@ struct wiphy {
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -5490,7 +5556,7 @@ struct wireless_dev {
unsigned long unprot_beacon_reported;
};
-static inline u8 *wdev_address(struct wireless_dev *wdev)
+static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@ -6309,6 +6375,17 @@ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
}
/**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band);
+
+/**
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
* @non_inherit_element: non inheritance element
diff --git a/include/net/codel.h b/include/net/codel.h
index a6e428f80135..a6c9e34e62b8 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -102,6 +102,9 @@ static inline u32 codel_time_to_us(codel_time_t val)
* @interval: width of moving time window
* @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
+ * @ce_threshold_selector: apply ce_threshold to packets matching this value
+ * in the diffserv/ECN byte of the IP header
+ * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison
*/
struct codel_params {
codel_time_t target;
@@ -109,6 +112,8 @@ struct codel_params {
codel_time_t interval;
u32 mtu;
bool ecn;
+ u8 ce_threshold_selector;
+ u8 ce_threshold_mask;
};
/**
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index d289b91dcd65..137d40d8cbeb 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -54,6 +54,8 @@ static void codel_params_init(struct codel_params *params)
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ce_threshold_mask = 0;
+ params->ce_threshold_selector = 0;
params->ecn = false;
}
@@ -246,9 +248,19 @@ static struct sk_buff *codel_dequeue(void *ctx,
vars->rec_inv_sqrt);
}
end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
+ bool set_ce = true;
+
+ if (params->ce_threshold_mask) {
+ int dsfield = skb_get_dsfield(skb);
+
+ set_ce = (dsfield >= 0 &&
+ (((u8)dsfield & params->ce_threshold_mask) ==
+ params->ce_threshold_selector));
+ }
+ if (set_ce && INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ }
return skb;
}
diff --git a/include/net/datalink.h b/include/net/datalink.h
index a9663229b913..d9b7faaa539f 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -12,7 +12,7 @@ struct datalink_proto {
int (*rcvfunc)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
int (*request)(struct datalink_proto *, struct sk_buff *,
- unsigned char *);
+ const unsigned char *);
struct list_head node;
};
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 154cf0dbca37..aab3d007c577 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -21,45 +21,7 @@
#include <linux/xarray.h>
#include <linux/firmware.h>
-#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
- (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
-
-struct devlink_dev_stats {
- u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
- u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
-};
-
-struct devlink_ops;
-
-struct devlink {
- u32 index;
- struct list_head port_list;
- struct list_head rate_list;
- struct list_head sb_list;
- struct list_head dpipe_table_list;
- struct list_head resource_list;
- struct list_head param_list;
- struct list_head region_list;
- struct list_head reporter_list;
- struct mutex reporters_lock; /* protects reporter_list */
- struct devlink_dpipe_headers *dpipe_headers;
- struct list_head trap_list;
- struct list_head trap_group_list;
- struct list_head trap_policer_list;
- const struct devlink_ops *ops;
- struct xarray snapshot_ids;
- struct devlink_dev_stats stats;
- struct device *dev;
- possible_net_t _net;
- struct mutex lock; /* Serializes access to devlink instance specific objects such as
- * port, sb, dpipe, resource, params, region, traps and more.
- */
- u8 reload_failed:1,
- reload_enabled:1;
- refcount_t refcount;
- struct completion comp;
- char priv[0] __aligned(NETDEV_ALIGN);
-};
+struct devlink;
struct devlink_port_phys_attrs {
u32 port_number; /* Same value as "split group".
@@ -506,7 +468,6 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
- bool published;
};
enum devlink_param_generic_id {
@@ -1224,6 +1185,11 @@ enum devlink_trap_group_generic_id {
.min_burst = _min_burst, \
}
+enum {
+ /* device supports reload operations */
+ DEVLINK_F_RELOAD = 1UL << 0,
+};
+
struct devlink_ops {
/**
* @supported_flash_update_params:
@@ -1520,34 +1486,9 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
};
-static inline void *devlink_priv(struct devlink *devlink)
-{
- BUG_ON(!devlink);
- return &devlink->priv;
-}
-
-static inline struct devlink *priv_to_devlink(void *priv)
-{
- BUG_ON(!priv);
- return container_of(priv, struct devlink, priv);
-}
-
-static inline struct devlink_port *
-netdev_to_devlink_port(struct net_device *dev)
-{
- if (dev->netdev_ops->ndo_get_devlink_port)
- return dev->netdev_ops->ndo_get_devlink_port(dev);
- return NULL;
-}
-
-static inline struct devlink *netdev_to_devlink(struct net_device *dev)
-{
- struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
-
- if (devlink_port)
- return devlink_port->devlink;
- return NULL;
-}
+void *devlink_priv(struct devlink *devlink);
+struct devlink *priv_to_devlink(void *priv);
+struct device *devlink_to_dev(const struct devlink *devlink);
struct ib_device;
@@ -1566,10 +1507,9 @@ static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
{
return devlink_alloc_ns(ops, priv_size, &init_net, dev);
}
-int devlink_register(struct devlink *devlink);
+void devlink_set_features(struct devlink *devlink, u64 features);
+void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
-void devlink_reload_enable(struct devlink *devlink);
-void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
@@ -1651,34 +1591,11 @@ int devlink_param_register(struct devlink *devlink,
const struct devlink_param *param);
void devlink_param_unregister(struct devlink *devlink,
const struct devlink_param *param);
-void devlink_params_publish(struct devlink *devlink);
-void devlink_params_unpublish(struct devlink *devlink);
-void devlink_param_publish(struct devlink *devlink,
- const struct devlink_param *param);
-void devlink_param_unpublish(struct devlink *devlink,
- const struct devlink_param *param);
-int devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
-void devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count);
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
-int
-devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value *init_val);
-int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
-void devlink_port_param_value_changed(struct devlink_port *devlink_port,
- u32 param_id);
-void devlink_param_value_str_fill(union devlink_param_value *dst_val,
- const char *src);
struct devlink_region *
devlink_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
@@ -1723,10 +1640,7 @@ int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
const char *name);
int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
-int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value);
-int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
-int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
u16 value_len);
@@ -1812,9 +1726,12 @@ devlink_trap_policers_unregister(struct devlink *devlink,
#if IS_ENABLED(CONFIG_NET_DEVLINK)
-void devlink_compat_running_version(struct net_device *dev,
+struct devlink *__must_check devlink_try_get(struct devlink *devlink);
+void devlink_put(struct devlink *devlink);
+
+void devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len);
-int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
+int devlink_compat_flash_update(struct devlink *devlink, const char *file_name);
int devlink_compat_phys_port_name_get(struct net_device *dev,
char *name, size_t len);
int devlink_compat_switch_id_get(struct net_device *dev,
@@ -1822,13 +1739,22 @@ int devlink_compat_switch_id_get(struct net_device *dev,
#else
+static inline struct devlink *devlink_try_get(struct devlink *devlink)
+{
+ return NULL;
+}
+
+static inline void devlink_put(struct devlink *devlink)
+{
+}
+
static inline void
-devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
+devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len)
{
}
static inline int
-devlink_compat_flash_update(struct net_device *dev, const char *file_name)
+devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
{
return -EOPNOTSUPP;
}
diff --git a/include/net/dn.h b/include/net/dn.h
index 56ab0726c641..ba9655b0098a 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -166,7 +166,7 @@ struct dn_skb_cb {
int iif;
};
-static inline __le16 dn_eth2dn(unsigned char *ethaddr)
+static inline __le16 dn_eth2dn(const unsigned char *ethaddr)
{
return get_unaligned((__le16 *)(ethaddr + 4));
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index d784e76113b8..eff5c44ba377 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -51,6 +51,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_SEVILLE_VALUE 21
#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
#define DSA_TAG_PROTO_SJA1110_VALUE 23
+#define DSA_TAG_PROTO_RTL8_4_VALUE 24
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -77,6 +78,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
+ DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
};
struct dsa_switch;
@@ -285,6 +287,7 @@ struct dsa_port {
/* List of MAC addresses that must be forwarded on this port.
* These are only valid on CPU ports and DSA links.
*/
+ struct mutex addr_lists_lock;
struct list_head fdbs;
struct list_head mdbs;
@@ -472,14 +475,41 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
}
+#define dsa_tree_for_each_user_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_port(_dp, _ds) \
+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
+ if ((_dp)->ds == (_ds))
+
+#define dsa_switch_for_each_available_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (!dsa_port_is_unused((_dp)))
+
+#define dsa_switch_for_each_user_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_switch_for_each_cpu_port(_dp, _ds) \
+ dsa_switch_for_each_port((_dp), (_ds)) \
+ if (dsa_port_is_cpu((_dp)))
+
static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
+ struct dsa_port *dp;
u32 mask = 0;
- int p;
- for (p = 0; p < ds->num_ports; p++)
- if (dsa_is_user_port(ds, p))
- mask |= BIT(p);
+ dsa_switch_for_each_user_port(dp, ds)
+ mask |= BIT(dp->index);
return mask;
}
@@ -615,6 +645,8 @@ struct dsa_switch_ops {
/*
* PHYLINK integration
*/
+ void (*phylink_get_interfaces)(struct dsa_switch *ds, int port,
+ unsigned long *supported_interfaces);
void (*phylink_validate)(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state);
@@ -645,6 +677,12 @@ struct dsa_switch_ops {
int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
+ void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
void (*get_stats64)(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s);
void (*self_test)(struct dsa_switch *ds, int port,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index ffd386ea0dbb..aa33e1092e2c 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -287,6 +287,7 @@ enum flow_dissector_key_id {
#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1)
#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2)
+#define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3)
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 1424e02cef90..7aa2b8e1fb29 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,14 +7,17 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
-/* Note: this used to be in include/uapi/linux/gen_stats.h */
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u64 packets;
-};
-
-struct gnet_stats_basic_cpu {
- struct gnet_stats_basic_packed bstats;
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+ u64_stats_t bytes;
+ u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -34,6 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);
@@ -42,41 +46,38 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(const seqcount_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
-int gnet_stats_copy_basic_hw(const seqcount_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
+int gnet_stats_copy_basic_hw(struct gnet_dump *d,
+ struct gnet_stats_basic_sync __percpu *cpu,
+ struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
-void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
- const struct gnet_stats_queue __percpu *cpu_q,
- const struct gnet_stats_queue *q, __u32 qlen);
+void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+ struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
spinlock_t *lock,
- seqcount_t *running, struct nlattr *opt);
+ bool running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
struct gnet_stats_rate_est64 *sample);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b06c2d02ec84..fa6a87246a7b 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -289,7 +289,7 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{
/* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(*sk->sk_prot->orphan_count);
}
void inet_csk_destroy_sock(struct sock *sk);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index ba77f47ef61e..ea32393464a2 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -188,6 +188,23 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0;
}
+static inline int skb_get_dsfield(struct sk_buff *skb)
+{
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ break;
+ return ipv4_get_dsfield(ip_hdr(skb));
+
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ break;
+ return ipv6_get_dsfield(ipv6_hdr(skb));
+ }
+
+ return -1;
+}
+
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 89163ef8cf4b..9e1111f5915b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -207,11 +207,10 @@ struct inet_sock {
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
+ struct ip_options_rcu __rcu *inet_opt;
__be16 inet_sport;
__u16 inet_id;
- struct ip_options_rcu __rcu *inet_opt;
- int rx_dst_ifindex;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
index 3c2993bc48c8..3f45ba37a2c6 100644
--- a/include/net/ioam6.h
+++ b/include/net/ioam6.h
@@ -56,7 +56,8 @@ static inline struct ioam6_pernet_data *ioam6_pernet(struct net *net)
struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id);
void ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
- struct ioam6_trace_hdr *trace);
+ struct ioam6_trace_hdr *trace,
+ bool is_input);
int ioam6_init(void);
void ioam6_exit(void);
diff --git a/include/net/ip.h b/include/net/ip.h
index 9192444f2964..b71e88507c4a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -24,6 +24,7 @@
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/sockptr.h>
+#include <linux/static_key.h>
#include <net/inet_sock.h>
#include <net/route.h>
@@ -291,7 +292,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
+static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
@@ -746,6 +751,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
+DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7cb5a1aace40..ff1804a0c469 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -931,6 +931,7 @@ struct netns_ipvs {
int sysctl_conn_reuse_mode;
int sysctl_schedule_icmp;
int sysctl_ignore_tunneled;
+ int sysctl_run_estimation;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1071,6 +1072,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return ipvs->sysctl_cache_bypass;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_run_estimation;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1163,6 +1169,11 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_run_estimation(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f2d0ecc257bb..c19bf51ded1d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1092,6 +1092,7 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
/*
* socket options (ipv6_sockglue.c)
*/
+DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount);
int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
diff --git a/include/net/llc.h b/include/net/llc.h
index df282d9b4017..fd1f9a3fd8dd 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -133,7 +133,7 @@ static inline void llc_sap_put(struct llc_sap *sap)
struct llc_sap *llc_sap_find(unsigned char sap_value);
int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
- unsigned char *dmac, unsigned char dsap);
+ const unsigned char *dmac, unsigned char dsap);
void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index 8d5c543cd620..c72570a21a4f 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,7 +62,8 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
+ u8 dsap);
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
int llc_send_disc(struct sock *sk);
#endif /* LLC_IF_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 618d1f427cb2..dd757f0987b0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -632,6 +632,10 @@ struct ieee80211_fils_discovery {
* @s1g: BSS is S1G BSS (affects Association Request format).
* @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
* to driver when rate control is offloaded to firmware.
+ * @power_type: power type of BSS for 6 GHz
+ * @tx_pwr_env: transmit power envelope array of BSS.
+ * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @pwr_reduction: power constraint of BSS.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -702,6 +706,10 @@ struct ieee80211_bss_conf {
u32 unsol_bcast_probe_resp_interval;
bool s1g;
struct cfg80211_bitrate_mask beacon_tx_rate;
+ enum ieee80211_ap_reg_power power_type;
+ struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
+ u8 tx_pwr_env_num;
+ u8 pwr_reduction;
};
/**
@@ -1715,6 +1723,7 @@ enum ieee80211_offload_flags {
* write-protected by sdata_lock and local->mtx so holding either is fine
* for read access.
* @color_change_color: the bss color that will be used after the change.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1746,6 +1755,8 @@ struct ieee80211_vif {
bool color_change_active;
u8 color_change_color;
+ struct ieee80211_vif *mbssid_tx_vif;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
diff --git a/include/net/mctp.h b/include/net/mctp.h
index ffd2c23bd76d..7e35ec79b909 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#include <linux/mctp.h>
+#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -58,39 +59,53 @@ struct mctp_sock {
mctp_eid_t bind_addr;
__u8 bind_type;
+ /* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
+ bool addr_ext;
+
/* list of mctp_sk_key, for incoming tag lookup. updates protected
* by sk->net->keys_lock
*/
struct hlist_head keys;
+
+ /* mechanism for expiring allocated keys; will release an allocated
+ * tag, and any netdev state for a request/response pairing
+ */
+ struct timer_list key_expiry;
};
/* Key for matching incoming packets to sockets or reassembly contexts.
* Packets are matched on (src,dest,tag).
*
- * Lifetime requirements:
+ * Lifetime / locking requirements:
+ *
+ * - individual key data (ie, the struct itself) is protected by key->lock;
+ * changes must be made with that lock held.
+ *
+ * - the lookup fields: peer_addr, local_addr and tag are set before the
+ * key is added to lookup lists, and never updated.
*
- * - keys are free()ed via RCU
+ * - A ref to the key must be held (throuh key->refs) if a pointer to the
+ * key is to be accessed after key->lock is released.
*
* - a mctp_sk_key contains a reference to a struct sock; this is valid
* for the life of the key. On sock destruction (through unhash), the key is
- * removed from lists (see below), and will not be observable after a RCU
- * grace period.
- *
- * any RX occurring within that grace period may still queue to the socket,
- * but will hit the SOCK_DEAD case before the socket is freed.
+ * removed from lists (see below), and marked invalid.
*
* - these mctp_sk_keys appear on two lists:
* 1) the struct mctp_sock->keys list
* 2) the struct netns_mctp->keys list
*
- * updates to either list are performed under the netns_mctp->keys
- * lock.
+ * presences on these lists requires a (single) refcount to be held; both
+ * lists are updated as a single operation.
+ *
+ * Updates and lookups in either list are performed under the
+ * netns_mctp->keys lock. Lookup functions will need to lock the key and
+ * take a reference before unlocking the keys_lock. Consequently, the list's
+ * keys_lock *cannot* be acquired with the individual key->lock held.
*
* - a key may have a sk_buff attached as part of an in-progress message
- * reassembly (->reasm_head). The reassembly context is protected by
- * reasm_lock, which may be acquired with the keys lock (above) held, if
- * necessary. Consequently, keys lock *cannot* be acquired with the
- * reasm_lock held.
+ * reassembly (->reasm_head). The reasm data is protected by the individual
+ * key->lock.
*
* - there are two destruction paths for a mctp_sk_key:
*
@@ -101,6 +116,8 @@ struct mctp_sock {
* the (complete) reply, or during reassembly errors. Here, we clean up
* the reassembly context (marking reasm_dead, to prevent another from
* starting), and remove the socket from the netns & socket lists.
+ *
+ * - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
mctp_eid_t peer_addr;
@@ -116,20 +133,40 @@ struct mctp_sk_key {
/* per-socket list */
struct hlist_node sklist;
+ /* lock protects against concurrent updates to the reassembly and
+ * expiry data below.
+ */
+ spinlock_t lock;
+
+ /* Keys are referenced during the output path, which may sleep */
+ refcount_t refs;
+
/* incoming fragment reassembly context */
- spinlock_t reasm_lock;
struct sk_buff *reasm_head;
struct sk_buff **reasm_tailp;
bool reasm_dead;
u8 last_seq;
- struct rcu_head rcu;
+ /* key validity */
+ bool valid;
+
+ /* expiry timeout; valid (above) cleared on expiry */
+ unsigned long expiry;
+
+ /* free to use for device flow state tracking. Initialised to
+ * zero on initial key creation
+ */
+ unsigned long dev_flow_state;
+ struct mctp_dev *dev;
};
struct mctp_skb_cb {
unsigned int magic;
unsigned int net;
+ int ifindex; /* extended/direct addressing if set */
mctp_eid_t src;
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
};
/* skb control-block accessors with a little extra debugging for initial
@@ -153,10 +190,18 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
{
struct mctp_skb_cb *cb = (void *)skb->cb;
+ BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
WARN_ON(cb->magic != 0x4d435450);
return (void *)(skb->cb);
}
+/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
+ * indicating the flow to the device driver.
+ */
+struct mctp_flow {
+ struct mctp_sk_key *key;
+};
+
/* Route definition.
*
* These are held in the pernet->mctp.routes list, with RCU protection for
@@ -165,8 +210,7 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
*
* Updates to the route table are performed under rtnl; all reads under RCU,
* so routes cannot be referenced over a RCU grace period. Specifically: A
- * caller cannot block between mctp_route_lookup and passing the route to
- * mctp_do_route.
+ * caller cannot block between mctp_route_lookup and mctp_route_release()
*/
struct mctp_route {
mctp_eid_t min, max;
@@ -186,11 +230,11 @@ struct mctp_route {
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr);
-int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb);
-
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+void mctp_key_unref(struct mctp_sk_key *key);
+
/* routing <--> device interface */
unsigned int mctp_default_net(struct net *net);
int mctp_default_net_set(struct net *net, unsigned int index);
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
index 71a11012fac7..5c0d04b5c12c 100644
--- a/include/net/mctpdevice.h
+++ b/include/net/mctpdevice.h
@@ -14,11 +14,17 @@
#include <linux/types.h>
#include <linux/refcount.h>
+struct mctp_sk_key;
+
struct mctp_dev {
struct net_device *dev;
+ refcount_t refs;
+
unsigned int net;
+ const struct mctp_netdev_ops *ops;
+
/* Only modified under RTNL. Reads have addrs_lock held */
u8 *addrs;
size_t num_addrs;
@@ -27,9 +33,24 @@ struct mctp_dev {
struct rcu_head rcu;
};
+struct mctp_netdev_ops {
+ void (*release_flow)(struct mctp_dev *dev,
+ struct mctp_sk_key *key);
+};
+
#define MCTP_INITIAL_DEFAULT_NET 1
struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
+int mctp_register_netdev(struct net_device *dev,
+ const struct mctp_netdev_ops *ops);
+void mctp_unregister_netdev(struct net_device *dev);
+
+void mctp_dev_hold(struct mctp_dev *mdev);
+void mctp_dev_put(struct mctp_dev *mdev);
+
+void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key);
+
#endif /* __NET_MCTPDEVICE_H */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 3214848402ec..a925349b4b89 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -12,6 +12,8 @@
#include <linux/tcp.h>
#include <linux/types.h>
+struct mptcp_info;
+struct mptcp_sock;
struct seq_file;
/* MPTCP sk_buff extension data */
@@ -125,6 +127,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts);
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
+
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
*/
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 38e4094960ce..04341d86585d 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -137,7 +137,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
u8 *opt, int opt_len,
struct ndisc_options *ndopts);
-void __ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data,
+void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad);
#define NDISC_OPS_REDIRECT_DATA_SPACE 2
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 22ced1381ede..38a0c1d24570 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -144,17 +144,18 @@ struct neighbour {
struct timer_list timer;
unsigned long used;
atomic_t probes;
- __u8 flags;
- __u8 nud_state;
- __u8 type;
- __u8 dead;
+ u8 nud_state;
+ u8 type;
+ u8 dead;
u8 protocol;
+ u32 flags;
seqlock_t ha_lock;
unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct list_head gc_list;
+ struct list_head managed_list;
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
@@ -172,7 +173,7 @@ struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
- u8 flags;
+ u32 flags;
u8 protocol;
u8 key[];
};
@@ -216,11 +217,13 @@ struct neigh_table {
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
+ struct delayed_work managed_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
atomic_t gc_entries;
struct list_head gc_list;
+ struct list_head managed_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -250,12 +253,21 @@ static inline void *neighbour_priv(const struct neighbour *n)
}
/* flags for neigh_update() */
-#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
-#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
-#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
-#define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000
-#define NEIGH_UPDATE_F_ISROUTER 0x40000000
-#define NEIGH_UPDATE_F_ADMIN 0x80000000
+#define NEIGH_UPDATE_F_OVERRIDE BIT(0)
+#define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1)
+#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2)
+#define NEIGH_UPDATE_F_USE BIT(3)
+#define NEIGH_UPDATE_F_MANAGED BIT(4)
+#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
+#define NEIGH_UPDATE_F_ISROUTER BIT(6)
+#define NEIGH_UPDATE_F_ADMIN BIT(7)
+
+/* In-kernel representation for NDA_FLAGS_EXT flags: */
+#define NTF_OLD_MASK 0xff
+#define NTF_EXT_SHIFT 8
+#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+
+#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
@@ -504,10 +516,15 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
{
const struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
+ /* n->nud_state and hh->hh_len could be changed under us.
+ * neigh_hh_output() is taking care of the race later.
+ */
+ if (!skip_cache &&
+ (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
+ READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- else
- return n->output(n, skb);
+
+ return n->output(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index a16171c5fd9e..a0d9e0b47ab8 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -21,13 +21,19 @@ struct module;
#define NFT_JUMP_STACK_SIZE 16
+enum {
+ NFT_PKTINFO_L4PROTO = (1 << 0),
+ NFT_PKTINFO_INNER = (1 << 1),
+};
+
struct nft_pktinfo {
struct sk_buff *skb;
const struct nf_hook_state *state;
- bool tprot_set;
+ u8 flags;
u8 tprot;
u16 fragoff;
unsigned int thoff;
+ unsigned int inneroff;
};
static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)
@@ -75,7 +81,7 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt)
{
- pkt->tprot_set = false;
+ pkt->flags = 0;
pkt->tprot = 0;
pkt->thoff = 0;
pkt->fragoff = 0;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index eb4c094cd54d..c4a6147b0ef8 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -10,7 +10,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
struct iphdr *ip;
ip = ip_hdr(pkt->skb);
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = ip->protocol;
pkt->thoff = ip_hdrlen(pkt->skb);
pkt->fragoff = ntohs(ip->frag_off) & IP_OFFSET;
@@ -36,7 +36,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
else if (len < thoff)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
pkt->thoff = thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
@@ -71,7 +71,7 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
goto inhdr_error;
}
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
pkt->thoff = thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
@@ -82,4 +82,5 @@ inhdr_error:
__IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
return -1;
}
+
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 7595e02b00ba..ec7eaeaf4f04 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -18,7 +18,7 @@ static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt)
return;
}
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
@@ -50,7 +50,7 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
if (protohdr < 0)
return -1;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
@@ -96,7 +96,7 @@ static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt)
if (protohdr < 0)
goto inhdr_error;
- pkt->tprot_set = true;
+ pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = protohdr;
pkt->thoff = thoff;
pkt->fragoff = frag_off;
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 832ab69efda5..4c3809e141f4 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -6,7 +6,7 @@
struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
- struct gnet_stats_basic_packed bstats;
+ struct gnet_stats_basic_sync bstats;
spinlock_t lock;
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index a4082406a003..3855f069627f 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -216,24 +216,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true);
}
-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
- (sizeof(dma_addr_t) > sizeof(unsigned long))
-
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
-
- return ret;
+ return page->dma_addr;
}
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr = addr;
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- page->dma_addr_upper = upper_32_bits(addr);
}
static inline void page_pool_set_frag_count(struct page *page, long nr)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 83a6d0792180..193f88ebf629 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -765,7 +765,7 @@ struct tc_cookie {
};
struct tc_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats;
};
@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
};
struct tc_gred_qopt_offload_stats {
- struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
@@ -977,6 +977,7 @@ enum tc_tbf_command {
TC_TBF_REPLACE,
TC_TBF_DESTROY,
TC_TBF_STATS,
+ TC_TBF_GRAFT,
};
struct tc_tbf_qopt_offload_replace_params {
@@ -992,6 +993,7 @@ struct tc_tbf_qopt_offload {
union {
struct tc_tbf_qopt_offload_replace_params replace_params;
struct tc_qopt_offload_stats stats;
+ u32 child_handle;
};
};
diff --git a/include/net/rose.h b/include/net/rose.h
index cf517d306a28..0f0a4ce0fee7 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -162,8 +162,8 @@ extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
-int rosecmp(rose_address *, rose_address *);
-int rosecmpm(rose_address *, rose_address *, unsigned short);
+int rosecmp(const rose_address *, const rose_address *);
+int rosecmpm(const rose_address *, const rose_address *, unsigned short);
char *rose2asc(char *buf, const rose_address *);
struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
void rose_kill_by_neigh(struct rose_neigh *);
@@ -205,8 +205,8 @@ extern const struct seq_operations rose_node_seqops;
extern struct seq_operations rose_route_seqops;
void rose_add_loopback_neigh(void);
-int __must_check rose_add_loopback_node(rose_address *);
-void rose_del_loopback_node(rose_address *);
+int __must_check rose_add_loopback_node(const rose_address *);
+void rose_del_loopback_node(const rose_address *);
void rose_rt_device_down(struct net_device *);
void rose_link_device_down(struct net_device *);
struct net_device *rose_dev_first(void);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c0069ac00e62..22179b2fda72 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -40,6 +40,13 @@ enum qdisc_state_t {
__QDISC_STATE_DRAINING,
};
+enum qdisc_state2_t {
+ /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
+ * Use qdisc_run_begin/end() or qdisc_is_running() instead.
+ */
+ __QDISC_STATE2_RUNNING,
+};
+
#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
@@ -97,7 +104,7 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct net_rate_estimator __rcu *rate_est;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
int pad;
refcount_t refcnt;
@@ -107,10 +114,10 @@ struct Qdisc {
*/
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
- struct gnet_stats_basic_packed bstats;
- seqcount_t running;
+ struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
unsigned long state;
+ unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
@@ -143,11 +150,15 @@ static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
return NULL;
}
+/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
+ * root_lock section, or provide their own memory barriers -- ordering
+ * against qdisc_run_begin/end() atomic bit operations.
+ */
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
- return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
+ return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
@@ -167,6 +178,9 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
return !READ_ONCE(qdisc->q.qlen);
}
+/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
+ * the qdisc root lock acquired.
+ */
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
@@ -206,15 +220,8 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
* after it releases the lock at the end of qdisc_run_end().
*/
return spin_trylock(&qdisc->seqlock);
- } else if (qdisc_is_running(qdisc)) {
- return false;
}
- /* Variant of write_seqcount_begin() telling lockdep a trylock
- * was attempted.
- */
- raw_write_seqcount_begin(&qdisc->running);
- seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
- return true;
+ return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
@@ -226,7 +233,7 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
&qdisc->state)))
__netif_schedule(qdisc);
} else {
- write_seqcount_end(&qdisc->running);
+ __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
}
@@ -308,6 +315,8 @@ struct Qdisc_ops {
struct netlink_ext_ack *extack);
void (*attach)(struct Qdisc *sch);
int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
+ void (*change_real_num_tx)(struct Qdisc *sch,
+ unsigned int new_real_tx);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
@@ -590,14 +599,6 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
-{
- struct Qdisc *root = qdisc_root_sleeping(qdisc);
-
- ASSERT_RTNL();
- return &root->running;
-}
-
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
@@ -684,6 +685,8 @@ void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
int dev_qdisc_change_tx_queue_len(struct net_device *dev);
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+ unsigned int new_real_tx);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
@@ -845,14 +848,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}
-static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
- bstats->bytes += bytes;
- bstats->packets += packets;
+ u64_stats_update_begin(&bstats->syncp);
+ u64_stats_add(&bstats->bytes, bytes);
+ u64_stats_add(&bstats->packets, packets);
+ u64_stats_update_end(&bstats->syncp);
}
-static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
@@ -860,26 +865,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}
-static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- __u64 bytes, __u32 packets)
-{
- u64_stats_update_begin(&bstats->syncp);
- _bstats_update(&bstats->bstats, bytes, packets);
- u64_stats_update_end(&bstats->syncp);
-}
-
-static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
- const struct sk_buff *skb)
-{
- u64_stats_update_begin(&bstats->syncp);
- bstats_update(&bstats->bstats, skb);
- u64_stats_update_end(&bstats->syncp);
-}
-
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -968,10 +957,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
- __u32 len = qdisc_qlen_sum(sch);
- __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
- *qlen = qstats.qlen;
+ gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
+ *qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}
@@ -1312,15 +1300,15 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
struct mini_Qdisc {
struct tcf_proto *filter_list;
struct tcf_block *block;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct rcu_head rcu;
+ unsigned long rcu_state;
};
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
- bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+ bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
@@ -1341,6 +1329,8 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block);
+void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 69bab88ad66b..189fdb9db162 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -626,7 +626,8 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
{
- return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0);
+ return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) -
+ sizeof(struct sctphdr);
}
static inline void sctp_transport_pl_reset(struct sctp_transport *t)
@@ -653,12 +654,10 @@ static inline void sctp_transport_pl_update(struct sctp_transport *t)
if (t->pl.state == SCTP_PL_DISABLED)
return;
- if (del_timer(&t->probe_timer))
- sctp_transport_put(t);
-
t->pl.state = SCTP_PL_BASE;
t->pl.pmtu = SCTP_BASE_PLPMTU;
t->pl.probe_size = SCTP_BASE_PLPMTU;
+ sctp_transport_reset_probe_timer(t);
}
static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
diff --git a/include/net/sock.h b/include/net/sock.h
index 463f390d90b3..b32906e1ab55 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -259,10 +259,11 @@ struct bpf_local_storage;
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -270,6 +271,7 @@ struct bpf_local_storage;
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@ -329,7 +331,6 @@ struct bpf_local_storage;
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@ -394,7 +395,6 @@ struct sock {
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -413,6 +413,7 @@ struct sock {
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@ -431,6 +432,9 @@ struct sock {
struct xfrm_policy __rcu *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@ -443,7 +447,6 @@ struct sock {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@ -1207,6 +1210,8 @@ struct proto {
unsigned int inuse_idx;
#endif
+ int (*forward_alloc_get)(const struct sock *sk);
+
bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
@@ -1214,6 +1219,7 @@ struct proto {
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
@@ -1237,7 +1243,7 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1291,20 +1297,22 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+static inline int sk_forward_alloc_get(const struct sock *sk)
+{
+ if (!sk->sk_prot->forward_alloc_get)
+ return sk->sk_forward_alloc;
+
+ return sk->sk_prot->forward_alloc_get(sk);
+}
+
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
return false;
-#ifdef CONFIG_INET
return sk->sk_prot->stream_memory_free ?
- INDIRECT_CALL_1(sk->sk_prot->stream_memory_free,
- tcp_stream_memory_free,
- sk, wake) : true;
-#else
- return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk, wake) : true;
-#endif
+ INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
+ tcp_stream_memory_free, sk, wake) : true;
}
static inline bool sk_stream_memory_free(const struct sock *sk)
@@ -1518,20 +1526,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
skb_pfmemalloc(skb);
}
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable);
+}
+
+static inline void sk_mem_reclaim_final(struct sock *sk)
+{
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1541,11 +1578,19 @@ static inline void sk_mem_charge(struct sock *sk, int size)
sk->sk_forward_alloc -= size;
}
+/* the following macros control memory reclaiming in sk_mem_uncharge()
+ */
+#define SK_RECLAIM_THRESHOLD (1 << 21)
+#define SK_RECLAIM_CHUNK (1 << 20)
+
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
@@ -1554,23 +1599,8 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
- __sk_mem_reclaim(sk, 1 << 20);
-}
-
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
-{
- sk_wmem_queued_add(sk, -skb->truesize);
- sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
- __kfree_skb(skb);
+ if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
+ __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK);
}
static inline void sock_release_ownership(struct sock *sk)
@@ -1889,10 +1919,8 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
@@ -1900,15 +1928,19 @@ static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
static inline int sk_rx_queue_get(const struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
#endif
return -1;
@@ -2388,13 +2420,11 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
@@ -2608,7 +2638,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2620,12 +2649,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
@@ -2820,6 +2843,10 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs);
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
static inline bool sk_is_readable(struct sock *sk)
{
if (sk->sk_prot->sock_is_readable)
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 60d806b6a5ae..d353793dfeb5 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -299,28 +299,16 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
-int switchdev_handle_fdb_add_to_device(struct net_device *dev,
+int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
bool (*foreign_dev_check_cb)(const struct net_device *dev,
const struct net_device *foreign_dev),
- int (*add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info));
-
-int switchdev_handle_fdb_del_to_device(struct net_device *dev,
- const struct switchdev_notifier_fdb_info *fdb_info,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev),
- int (*del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info));
int switchdev_handle_port_obj_add(struct net_device *dev,
@@ -426,32 +414,16 @@ call_switchdev_blocking_notifiers(unsigned long val,
}
static inline int
-switchdev_handle_fdb_add_to_device(struct net_device *dev,
- const struct switchdev_notifier_fdb_info *fdb_info,
- bool (*check_cb)(const struct net_device *dev),
- bool (*foreign_dev_check_cb)(const struct net_device *dev,
- const struct net_device *foreign_dev),
- int (*add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_add_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info))
-{
- return 0;
-}
-
-static inline int
-switchdev_handle_fdb_del_to_device(struct net_device *dev,
+switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
bool (*foreign_dev_check_cb)(const struct net_device *dev,
const struct net_device *foreign_dev),
- int (*del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
- int (*lag_del_cb)(struct net_device *dev,
- const struct net_device *orig_dev, const void *ctx,
+ int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info))
{
return 0;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 60c384569e9c..70972f3ac8fa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -48,7 +48,9 @@
extern struct inet_hashinfo tcp_hashinfo;
-extern struct percpu_counter tcp_orphan_count;
+DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
+int tcp_orphan_count_sum(void);
+
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
@@ -288,21 +290,15 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
-void sk_forced_mem_schedule(struct sock *sk, int size);
-
-static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
+static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- struct percpu_counter *ocp = sk->sk_prot->orphan_count;
- int orphans = percpu_counter_read_positive(ocp);
-
- if (orphans << shift > sysctl_tcp_max_orphans) {
- orphans = percpu_counter_sum_positive(ocp);
- if (orphans << shift > sysctl_tcp_max_orphans)
- return true;
- }
- return false;
+ sk_wmem_queued_add(sk, -skb->truesize);
+ sk_mem_uncharge(sk, skb->truesize);
+ __kfree_skb(skb);
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
bool tcp_check_oom(struct sock *sk, int shift);
@@ -322,7 +318,7 @@ void tcp_shutdown(struct sock *sk, int how);
int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
-void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb);
+void tcp_remove_empty_skb(struct sock *sk);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
@@ -330,8 +326,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
-struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
@@ -350,6 +344,8 @@ void tcp_twsk_destructor(struct sock *sk);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
@@ -581,6 +577,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
@@ -874,10 +872,11 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
+#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
/* There is space for up to 24 bytes */
- __u32 in_flight:30,/* Bytes in flight at transmit */
- is_app_limited:1, /* cwnd not fully used? */
- unused:1;
+ __u32 is_app_limited:1, /* cwnd not fully used? */
+ delivered_ce:20,
+ unused:11;
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
@@ -1029,7 +1028,9 @@ struct ack_sample {
struct rate_sample {
u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
+ s32 delivered_ce; /* number of packets delivered w/ CE marks*/
long interval_us; /* time for tp->delivered to incr "delivered" */
u32 snd_interval_us; /* snd interval for delivered packets */
u32 rcv_interval_us; /* rcv interval for delivered packets */
@@ -1418,6 +1419,17 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+}
+
void tcp_cleanup_rbuf(struct sock *sk, int copied);
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
@@ -1870,7 +1882,7 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
{
list_del(&skb->tcp_tsorted_anchor);
tcp_rtx_queue_unlink(skb, sk);
- sk_wmem_free_skb(sk, skb);
+ tcp_wmem_free_skb(sk, skb);
}
static inline void tcp_push_pending_frames(struct sock *sk)
diff --git a/include/net/tls.h b/include/net/tls.h
index 1fffb206f09f..526cb2c3b724 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -66,7 +66,7 @@
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@ -74,6 +74,7 @@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
+#define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
@@ -220,6 +221,8 @@ union tls_crypto_context {
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
diff --git a/include/net/xdp.h b/include/net/xdp.h
index ad5b02dcb6f4..447f9b1578f3 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -15,13 +15,13 @@
* level RX-ring queues. It is information that is specific to how
* the driver have configured a given RX-ring queue.
*
- * Each xdp_buff frame received in the driver carry a (pointer)
+ * Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
* data-path read-access to RX-info for both kernel and bpf-side
* (limited subset).
*
* For now, direct access is only safe while running in NAPI/softirq
- * context. Contents is read-mostly and must not be updated during
+ * context. Contents are read-mostly and must not be updated during
* driver NAPI/softirq poll.
*
* The driver usage API is a register and unregister API.
@@ -30,8 +30,8 @@
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
* naturally need to stop the RX-ring before purging and reallocating
- * memory. In that process the driver MUST call unregistor (which
- * also apply for driver shutdown and unload). The register API is
+ * memory. In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
*/
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 4e295541e396..443d45951564 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -77,6 +77,12 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return xp_alloc(pool);
}
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return xp_alloc_batch(pool, xdp, max);
+}
+
static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return xp_can_alloc(pool, count);
@@ -89,6 +95,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
+}
+
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{
@@ -212,6 +225,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return NULL;
}
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+ return 0;
+}
+
static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return false;
@@ -221,6 +239,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
u64 addr)
{
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 7a9a23e7a604..ddeefc4a1040 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -7,6 +7,7 @@
#include <linux/if_xdp.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/bpf.h>
#include <net/xdp.h>
struct xsk_buff_pool;
@@ -23,7 +24,6 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- bool unaligned;
u64 orig_addr;
struct list_head free_list_node;
};
@@ -67,6 +67,7 @@ struct xsk_buff_pool {
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
+ u32 chunk_shift;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
@@ -81,6 +82,13 @@ struct xsk_buff_pool {
struct xdp_buff_xsk *free_heads[];
};
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
/* AF_XDP core. */
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem);
@@ -89,7 +97,6 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id);
void xp_destroy(struct xsk_buff_pool *pool);
-void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool);
@@ -99,12 +106,28 @@ void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ u64 addr)
+{
+ xskb->orig_addr = addr;
+ xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+ dma_addr_t *dma_pages, u64 addr)
+{
+ xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+ (addr & ~PAGE_MASK);
+ xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
@@ -180,4 +203,25 @@ static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
xp_unaligned_extract_offset(addr);
}
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+ if (xskb->pool->unaligned)
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+ offset += xskb->pool->headroom;
+ if (!xskb->pool->unaligned)
+ return xskb->orig_addr + offset;
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
#endif /* XSK_BUFF_POOL_H_ */