summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/etherdevice.h18
-rw-r--r--include/linux/ethtool.h67
-rw-r--r--include/linux/filter.h9
-rw-r--r--include/linux/ieee80211.h100
-rw-r--r--include/linux/if_hsr.h17
-rw-r--r--include/linux/if_vlan.h41
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h133
-rw-r--r--include/linux/mroute_base.h6
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/net_tstamp.h29
-rw-r--r--include/linux/netdevice.h202
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_netdev.h3
-rw-r--r--include/linux/netpoll.h2
-rw-r--r--include/linux/packing.h425
-rw-r--r--include/linux/pci.h14
-rw-r--r--include/linux/pcs/pcs-xpcs.h1
-rw-r--r--include/linux/phy.h137
-rw-r--r--include/linux/phylib_stubs.h42
-rw-r--r--include/linux/phylink.h76
-rw-r--r--include/linux/pldmfw.h8
-rw-r--r--include/linux/pse-pd/pse.h134
-rw-r--r--include/linux/ptp_clock_kernel.h4
-rw-r--r--include/linux/ptr_ring.h21
-rw-r--r--include/linux/rfkill.h2
-rw-r--r--include/linux/rtnetlink.h14
-rw-r--r--include/linux/skb_array.h17
-rw-r--r--include/linux/skbuff.h65
-rw-r--r--include/linux/stmmac.h10
-rw-r--r--include/linux/wwan.h2
36 files changed, 1399 insertions, 235 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6e63dd3443b9..9ef57f6f584a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2589,10 +2589,10 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
- struct bpf_prog *xdp_prog);
+ const struct bpf_prog *xdp_prog);
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress);
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress);
void __cpu_map_flush(struct list_head *flush_list);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
@@ -2862,15 +2862,15 @@ struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
return 0;
}
static inline
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress)
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress)
{
return 0;
}
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index ecf203f01034..9a1eacf35d37 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -81,7 +81,7 @@ static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) =
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
+ * Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per
* IEEE 802.1Q 8.6.3 Frame filtering.
*
* Please note: addr must be aligned to u16.
@@ -104,7 +104,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is all zeroes.
+ * Return: true if the address is all zeroes.
*
* Please note: addr must be aligned to u16.
*/
@@ -123,7 +123,7 @@ static inline bool is_zero_ether_addr(const u8 *addr)
* is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a multicast address.
+ * Return: true if the address is a multicast address.
* By definition the broadcast address is also a multicast address.
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
@@ -157,7 +157,7 @@ static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
* is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a local address.
+ * Return: true if the address is a local address.
*/
static inline bool is_local_ether_addr(const u8 *addr)
{
@@ -168,7 +168,7 @@ static inline bool is_local_ether_addr(const u8 *addr)
* is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is the broadcast address.
+ * Return: true if the address is the broadcast address.
*
* Please note: addr must be aligned to u16.
*/
@@ -183,7 +183,7 @@ static inline bool is_broadcast_ether_addr(const u8 *addr)
* is_unicast_ether_addr - Determine if the Ethernet address is unicast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a unicast address.
+ * Return: true if the address is a unicast address.
*/
static inline bool is_unicast_ether_addr(const u8 *addr)
{
@@ -197,7 +197,7 @@ static inline bool is_unicast_ether_addr(const u8 *addr)
* Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
* a multicast address, and is not FF:FF:FF:FF:FF:FF.
*
- * Return true if the address is valid.
+ * Return: true if the address is valid.
*
* Please note: addr must be aligned to u16.
*/
@@ -214,7 +214,7 @@ static inline bool is_valid_ether_addr(const u8 *addr)
*
* Check that the value from the Ethertype/length field is a valid Ethertype.
*
- * Return true if the valid is an 802.3 supported Ethertype.
+ * Return: true if the valid is an 802.3 supported Ethertype.
*/
static inline bool eth_proto_is_802_3(__be16 proto)
{
@@ -458,7 +458,7 @@ static inline bool ether_addr_is_ip_mcast(const u8 *addr)
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return a u64 value of the address
+ * Return: a u64 value of the address
*/
static inline u64 ether_addr_to_u64(const u8 *addr)
{
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b8b935b52603..870994cc3ef7 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -78,6 +78,9 @@ enum {
* @cqe_size: Size of TX/RX completion queue event
* @tx_push_buf_len: Size of TX push buffer
* @tx_push_buf_max_len: Maximum allowed size of TX push buffer
+ * @hds_thresh: Packet size threshold for header data split (HDS)
+ * @hds_thresh_max: Maximum supported setting for @hds_threshold
+ *
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
@@ -87,6 +90,8 @@ struct kernel_ethtool_ringparam {
u32 cqe_size;
u32 tx_push_buf_len;
u32 tx_push_buf_max_len;
+ u32 hds_thresh;
+ u32 hds_thresh_max;
};
/**
@@ -97,6 +102,7 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
* @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
* @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
+ * @ETHTOOL_RING_USE_HDS_THRS: capture for setting header-data-split-thresh
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
@@ -105,6 +111,7 @@ enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
+ ETHTOOL_RING_USE_HDS_THRS = BIT(6),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
@@ -257,7 +264,7 @@ struct ethtool_link_ksettings {
* @mode : one of the ETHTOOL_LINK_MODE_*_BIT
* (not atomic, no bound checking)
*
- * Returns true/false.
+ * Returns: true/false.
*/
#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
@@ -412,6 +419,29 @@ struct ethtool_eth_phy_stats {
);
};
+/**
+ * struct ethtool_phy_stats - PHY-level statistics counters
+ * @rx_packets: Total successfully received frames
+ * @rx_bytes: Total successfully received bytes
+ * @rx_errors: Total received frames with errors (e.g., CRC errors)
+ * @tx_packets: Total successfully transmitted frames
+ * @tx_bytes: Total successfully transmitted bytes
+ * @tx_errors: Total transmitted frames with errors
+ *
+ * This structure provides a standardized interface for reporting
+ * PHY-level statistics counters. It is designed to expose statistics
+ * commonly provided by PHYs but not explicitly defined in the IEEE
+ * 802.3 standard.
+ */
+struct ethtool_phy_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+};
+
/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
* via a more targeted API.
*/
@@ -529,6 +559,12 @@ struct ethtool_rmon_stats {
/**
* struct ethtool_ts_stats - HW timestamping statistics
* @pkts: Number of packets successfully timestamped by the hardware.
+ * @onestep_pkts_unconfirmed: Number of PTP packets with one-step TX
+ * timestamping that were sent, but for which the
+ * device offers no confirmation whether they made
+ * it onto the wire and the timestamp was inserted
+ * in the originTimestamp or correctionField, or
+ * not.
* @lost: Number of hardware timestamping requests where the timestamping
* information from the hardware never arrived for submission with
* the skb.
@@ -541,6 +577,7 @@ struct ethtool_rmon_stats {
struct ethtool_ts_stats {
struct_group(tx_stats,
u64 pkts;
+ u64 onestep_pkts_unconfirmed;
u64 lost;
u64 err;
);
@@ -711,6 +748,7 @@ struct ethtool_rxfh_param {
* @cmd: command number = %ETHTOOL_GET_TS_INFO
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
+ * @phc_qualifier: qualifier of the associated PHC
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
*/
@@ -718,6 +756,7 @@ struct kernel_ethtool_ts_info {
u32 cmd;
u32 so_timestamping;
int phc_index;
+ enum hwtstamp_provider_qualifier phc_qualifier;
enum hwtstamp_tx_types tx_types;
enum hwtstamp_rx_filters rx_filters;
};
@@ -749,6 +788,7 @@ struct kernel_ethtool_ts_info {
* @rss_context argument to @create_rxfh_context and friends.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
+ * @supported_hwtstamp_qualifiers: bitfield of supported hwtstamp qualifier.
* @get_drvinfo: Report driver/device information. Modern drivers no
* longer have to implement this callback. Most fields are
* correctly filled in by the core using system information, or
@@ -966,6 +1006,7 @@ struct ethtool_ops {
u32 rxfh_max_num_contexts;
u32 supported_coalesce_params;
u32 supported_ring_params;
+ u32 supported_hwtstamp_qualifiers;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -1199,7 +1240,7 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
* @dev: pointer to net_device structure
* @vclock_index: pointer to pointer of vclock index
*
- * Return number of phc vclocks
+ * Return: number of phc vclocks
*/
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
@@ -1253,7 +1294,7 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
* ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
* @dev: pointer to net_device structure
* @info: buffer to hold the result
- * Returns zero on success, non-zero otherwise.
+ * Returns: zero on success, non-zero otherwise.
*/
int ethtool_get_ts_info_by_layer(struct net_device *dev,
struct kernel_ethtool_ts_info *info);
@@ -1299,24 +1340,4 @@ struct ethtool_forced_speed_map {
void
ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
-
-/* C33 PSE extended state and substate. */
-struct ethtool_c33_pse_ext_state_info {
- enum ethtool_c33_pse_ext_state c33_pse_ext_state;
- union {
- enum ethtool_c33_pse_ext_substate_error_condition error_condition;
- enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable;
- enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted;
- enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim;
- enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected;
- enum ethtool_c33_pse_ext_substate_power_not_available power_not_available;
- enum ethtool_c33_pse_ext_substate_short_detected short_detected;
- u32 __c33_pse_ext_substate;
- };
-};
-
-struct ethtool_c33_pse_pw_limit_range {
- u32 min;
- u32 max;
-};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0477254bc2d3..a3ea46281595 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1179,17 +1179,18 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct xdp_buff *xdp, struct bpf_prog *prog);
+ struct xdp_buff *xdp, const struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
int xdp_do_redirect_frame(struct net_device *dev,
struct xdp_buff *xdp,
struct xdp_frame *xdpf,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
void xdp_do_flush(void);
-void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 05dedc45505c..16741e542e81 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1532,6 +1532,17 @@ struct ieee80211_mgmt {
struct {
u8 action_code;
} __packed ttlm_tear_down;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ml_reconf_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 count;
+ u8 variable[];
+ } __packed ml_reconf_resp;
} u;
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -1542,11 +1553,13 @@ struct ieee80211_mgmt {
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
#define BSS_MEMBERSHIP_SELECTOR_GLK 125
-#define BSS_MEMBERSHIP_SELECTOR_EPS 124
+#define BSS_MEMBERSHIP_SELECTOR_EPD 124
#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
+#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY
+
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -3883,6 +3896,16 @@ enum ieee80211_protected_eht_actioncode {
WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ = 3,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP = 4,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN = 5,
+ WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF = 6,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECOMMEND = 7,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_REQ = 8,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_RESP = 9,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_NOTIF = 10,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ = 11,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP = 12,
};
/* Security key length */
@@ -4961,6 +4984,7 @@ struct ieee80211_multi_link_elem {
#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+#define IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP 0x0400
#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
@@ -5018,6 +5042,8 @@ struct ieee80211_multi_link_elem {
#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+#define IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT 0x2000
+#define IEEE80211_MLD_CAP_OP_ALIGNED_TWT_SUPPORT 0x4000
struct ieee80211_mle_basic_common_info {
u8 len;
@@ -5033,6 +5059,9 @@ struct ieee80211_mle_preq_common_info {
} __packed;
#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+#define IEEE80211_MLC_RECONF_PRES_EML_CAPA 0x0020
+#define IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP 0x0040
+#define IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP 0x0080
/* no fixed fields in RECONF */
@@ -5055,28 +5084,24 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
{
const struct ieee80211_multi_link_elem *mle = (const void *)data;
u16 control = le16_to_cpu(mle->control);
- u8 common = 0;
switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
case IEEE80211_ML_CONTROL_TYPE_BASIC:
case IEEE80211_ML_CONTROL_TYPE_PREQ:
case IEEE80211_ML_CONTROL_TYPE_TDLS:
case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
/*
* The length is the first octet pointed by mle->variable so no
* need to add anything
*/
break;
- case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
- if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
- common += ETH_ALEN;
- return common;
default:
WARN_ON(1);
return 0;
}
- return sizeof(*mle) + common + mle->variable[0];
+ return sizeof(*mle) + mle->variable[0];
}
/**
@@ -5227,6 +5252,47 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
}
/**
+ * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
+ * and operations.
+ * @data: pointer to the multi-link element
+ * Return: the extended MLD capabilities and operations field value from
+ * the multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_ext_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
* ieee80211_mle_get_mld_id - returns the MLD ID
* @data: pointer to the multi-link element
* Return: The MLD ID in the given multi-link element, or 0 if not present
@@ -5298,6 +5364,8 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
common += 2;
if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
break;
case IEEE80211_ML_CONTROL_TYPE_PREQ:
common += sizeof(struct ieee80211_mle_preq_common_info);
@@ -5308,14 +5376,19 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
case IEEE80211_ML_CONTROL_TYPE_RECONF:
if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
common += ETH_ALEN;
+ if (control & IEEE80211_MLC_RECONF_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
break;
case IEEE80211_ML_CONTROL_TYPE_TDLS:
common += sizeof(struct ieee80211_mle_tdls_common_info);
check_common_len = true;
break;
case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
- if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
- common += ETH_ALEN;
+ common = ETH_ALEN + 1;
break;
default:
/* we don't know this type */
@@ -5458,8 +5531,13 @@ ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta
#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
-#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE 0x0780
-#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_AP_REM 0
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_OP_PARAM_UPDATE 1
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK 2
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK 3
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_NSTR_STATUS 4
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
/**
* ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
index 0404f5bf4f30..d7941fd88032 100644
--- a/include/linux/if_hsr.h
+++ b/include/linux/if_hsr.h
@@ -13,6 +13,15 @@ enum hsr_version {
PRP_V1,
};
+enum hsr_port_type {
+ HSR_PT_NONE = 0, /* Must be 0, used by framereg */
+ HSR_PT_SLAVE_A,
+ HSR_PT_SLAVE_B,
+ HSR_PT_INTERLINK,
+ HSR_PT_MASTER,
+ HSR_PT_PORTS, /* This must be the last item in the enum */
+};
+
/* HSR Tag.
* As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
* path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
@@ -32,6 +41,8 @@ struct hsr_tag {
#if IS_ENABLED(CONFIG_HSR)
extern bool is_hsr_master(struct net_device *dev);
extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt);
#else
static inline bool is_hsr_master(struct net_device *dev)
{
@@ -42,6 +53,12 @@ static inline int hsr_get_version(struct net_device *dev,
{
return -EINVAL;
}
+
+static inline struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_HSR */
#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d65b5d71b93b..38456b42cdb5 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -176,6 +176,7 @@ struct netpoll;
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
+ * @netpoll: netpoll instance "propagated" down to @real_dev
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
@@ -310,7 +311,7 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
* eth_type_vlan - check for valid vlan ether type.
* @ethertype: ether type to check
*
- * Returns true if the ether type is a vlan ether type.
+ * Returns: true if the ether type is a vlan ether type.
*/
static inline bool eth_type_vlan(__be16 ethertype)
{
@@ -341,9 +342,9 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns error if skb_cow_head fails.
- *
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci,
@@ -390,9 +391,9 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns error if skb_cow_head fails.
- *
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
@@ -414,6 +415,8 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto,
@@ -443,6 +446,8 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
@@ -461,6 +466,8 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
@@ -533,7 +540,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not of VLAN type
+ * Returns: error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
@@ -551,7 +558,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if @skb->vlan_tci is not set correctly
+ * Returns: error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
@@ -570,7 +577,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not VLAN tagged
+ * Returns: error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
@@ -582,13 +589,13 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
/**
- * vlan_get_protocol - get protocol EtherType.
+ * __vlan_get_protocol_offset() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
* @mac_offset: MAC offset
* @depth: buffer to store length of eth and vlan tags in bytes
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
@@ -639,7 +646,7 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
@@ -720,7 +727,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
* Expects the skb to contain a VLAN tag in the payload, and to have skb->data
* pointing at the MAC header.
*
- * Returns a new pointer to skb->data, or NULL on failure to pull.
+ * Returns: a new pointer to skb->data, or NULL on failure to pull.
*/
static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
{
@@ -737,7 +744,7 @@ static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged, regardless of whether it is hardware
+ * Returns: true if the skb is tagged, regardless of whether it is hardware
* accelerated or not.
*/
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
@@ -753,7 +760,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * Returns: true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
@@ -784,7 +791,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
* @skb: skbuff to query
* @features: features to be checked
*
- * Returns features without unsafe ones if the skb has multiple tags.
+ * Returns: features without unsafe ones if the skb has multiple tags.
*/
static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
@@ -808,9 +815,11 @@ static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
* @h1: Pointer to vlan header
* @h2: Pointer to vlan header
*
- * Compare two vlan headers, returns 0 if equal.
+ * Compare two vlan headers.
*
* Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ *
+ * Return: 0 if equal, arbitrary non-zero value if not equal.
*/
static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
const struct vlan_hdr *h2)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 5171231f70a8..073b30a9b850 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -87,6 +87,8 @@ struct ip_mc_list {
char loaded;
unsigned char gsquery; /* check source marks? */
unsigned char crcount;
+ unsigned long mca_cstamp;
+ unsigned long mca_tstamp;
struct rcu_head rcu;
};
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 3a4e723eae0f..383ed9985802 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -222,6 +222,11 @@ static inline ktime_t ns_to_ktime(u64 ns)
return ns;
}
+static inline ktime_t us_to_ktime(u64 us)
+{
+ return us * NSEC_PER_USEC;
+}
+
static inline ktime_t ms_to_ktime(u64 ms)
{
return ms * NSEC_PER_MSEC;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index cc647992f3d1..0c48b20f818a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1245,6 +1245,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_SHAMPO = 0x1d,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
@@ -1470,6 +1471,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_MACSEC(mdev, cap)\
MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
+#define MLX5_CAP_SHAMPO(mdev, cap) \
+ MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ea48eb879a0f..af86097641b0 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -160,9 +160,12 @@ enum {
MLX5_REG_MIRC = 0x9162,
MLX5_REG_MTPTM = 0x9180,
MLX5_REG_MTCTR = 0x9181,
+ MLX5_REG_MRTCQ = 0x9182,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_NIC_CAP = 0xC00D,
MLX5_REG_DTOR = 0xC00E,
+ MLX5_REG_VHCA_ICM_CTRL = 0xC010,
};
enum mlx5_qpts_trust_state {
@@ -691,7 +694,6 @@ struct mlx5_timer {
struct timecounter tc;
u32 nominal_c_mult;
unsigned long overflow_period;
- struct delayed_work overflow_work;
};
struct mlx5_clock {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 438db888bde0..2a69d9d71276 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -163,7 +163,7 @@ struct mlx5_flow_destination {
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
- u32 counter_id;
+ struct mlx5_fc *counter;
struct {
u16 num;
u16 vhca_id;
@@ -299,6 +299,8 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size);
+void mlx5_fc_local_destroy(struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 48d47181c7cd..4f3716e124c9 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1095,7 +1095,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 log_esw_max_sched_depth[0x4];
u8 reserved_at_10[0x10];
- u8 reserved_at_20[0xb];
+ u8 reserved_at_20[0x9];
+ u8 esw_cross_esw_sched[0x1];
+ u8 reserved_at_2a[0x1];
u8 log_max_qos_nic_queue_group[0x5];
u8 reserved_at_30[0x10];
@@ -1103,7 +1105,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing_min_rate[0x20];
- u8 reserved_at_80[0x10];
+ u8 reserved_at_80[0xb];
+ u8 log_esw_max_rate_limit[0x5];
u8 packet_pacing_rate_table_size[0x10];
u8 esw_element_type[0x10];
@@ -1590,12 +1593,14 @@ enum {
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+ MLX5_STEERING_FORMAT_CONNECTX_8 = 3,
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x6];
u8 page_request_disable[0x1];
- u8 reserved_at_7[0x9];
+ u8 abs_native_port_num[0x1];
+ u8 reserved_at_8[0x8];
u8 shared_object_to_user_object_allowed[0x1];
u8 reserved_at_13[0xe];
u8 vhca_resource_manager[0x1];
@@ -1825,7 +1830,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 regexp_params[0x1];
u8 uar_sz[0x6];
u8 port_selection_cap[0x1];
- u8 reserved_at_251[0x1];
+ u8 nic_cap_reg[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
@@ -2324,7 +2329,9 @@ struct mlx5_ifc_wq_bits {
u8 headers_mkey[0x20];
u8 shampo_enable[0x1];
- u8 reserved_at_1e1[0x4];
+ u8 reserved_at_1e1[0x1];
+ u8 shampo_mode[0x2];
+ u8 reserved_at_1e4[0x1];
u8 log_reservation_size[0x3];
u8 reserved_at_1e8[0x5];
u8 log_max_num_of_packets_per_reservation[0x3];
@@ -3322,6 +3329,14 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_nic_cap_reg_bits {
+ u8 reserved_at_0[0x1a];
+ u8 vhca_icm_ctrl[0x1];
+ u8 reserved_at_1b[0x5];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_default_timeout_bits {
u8 to_multiplier[0x3];
u8 reserved_at_3[0x9];
@@ -3358,6 +3373,18 @@ struct mlx5_ifc_dtor_reg_bits {
u8 reserved_at_1c0[0x20];
};
+struct mlx5_ifc_vhca_icm_ctrl_reg_bits {
+ u8 vhca_id_valid[0x1];
+ u8 reserved_at_1[0xf];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_20[0xa0];
+
+ u8 cur_alloc_icm[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -3696,6 +3723,22 @@ struct mlx5_ifc_crypto_cap_bits {
u8 reserved_at_80[0x780];
};
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 shampo_header_split[0x1];
+ u8 shampo_header_split_data_merge[0x1];
+ u8 reserved_at_22[0x1];
+ u8 shampo_log_max_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@@ -4105,6 +4148,7 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT = 0x5,
};
enum {
@@ -4113,34 +4157,41 @@ enum {
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ ELEMENT_TYPE_CAP_MASK_RATE_LIMIT = 1 << 5,
};
enum {
TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB = 0x3,
};
enum {
TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+ TSAR_TYPE_CAP_MASK_TC_ARB = 1 << 3,
};
struct mlx5_ifc_tsar_element_bits {
- u8 reserved_at_0[0x8];
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0x4];
u8 tsar_type[0x8];
u8 reserved_at_10[0x10];
};
struct mlx5_ifc_vport_element_bits {
- u8 reserved_at_0[0x10];
+ u8 reserved_at_0[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
u8 vport_number[0x10];
};
struct mlx5_ifc_vport_tc_element_bits {
u8 traffic_class[0x4];
- u8 reserved_at_4[0xc];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
u8 vport_number[0x10];
};
@@ -4165,7 +4216,9 @@ struct mlx5_ifc_scheduling_context_bits {
u8 max_average_bw[0x20];
- u8 reserved_at_e0[0x120];
+ u8 max_bw_obj_id[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_rqtc_bits {
@@ -6326,6 +6379,20 @@ struct mlx5_ifc_modify_other_hca_cap_in_bits {
struct mlx5_ifc_other_hca_cap_bits other_capability;
};
+struct mlx5_ifc_sw_owner_icm_root_params_bits {
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+};
+
+struct mlx5_ifc_rtc_params_bits {
+ u8 rtc_id_0[0x20];
+
+ u8 rtc_id_1[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1];
u8 decap_en[0x1];
@@ -6344,20 +6411,10 @@ struct mlx5_ifc_flow_table_context_bits {
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0x60];
- union {
- struct {
- u8 sw_owner_icm_root_1[0x40];
-
- u8 sw_owner_icm_root_0[0x40];
- } sws;
- struct {
- u8 rtc_id_0[0x20];
- u8 rtc_id_1[0x20];
-
- u8 reserved_at_100[0x40];
-
- } hws;
+ union {
+ struct mlx5_ifc_sw_owner_icm_root_params_bits sws;
+ struct mlx5_ifc_rtc_params_bits hws;
};
};
@@ -7006,6 +7063,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
enum {
MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START = 0x2,
MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
};
@@ -10133,7 +10191,21 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_200g_2x[0x10];
u8 fec_override_admin_100g_1x[0x10];
- u8 reserved_at_260[0x20];
+ u8 reserved_at_260[0x60];
+
+ u8 fec_override_cap_1600g_8x[0x10];
+ u8 fec_override_cap_800g_4x[0x10];
+
+ u8 fec_override_cap_400g_2x[0x10];
+ u8 fec_override_cap_200g_1x[0x10];
+
+ u8 fec_override_admin_1600g_8x[0x10];
+ u8 fec_override_admin_800g_4x[0x10];
+
+ u8 fec_override_admin_400g_2x[0x10];
+ u8 fec_override_admin_200g_1x[0x10];
+
+ u8 reserved_at_340[0x80];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -10507,7 +10579,9 @@ struct mlx5_ifc_mtutc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x48];
+ u8 reserved_at_0[0x1d];
+ u8 fec_200G_per_lane_in_pplm[0x1];
+ u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
u8 reserved_at_49[0x1f];
u8 fec_50G_per_lane_in_pplm[0x1];
@@ -10647,7 +10721,8 @@ struct mlx5_ifc_mcam_access_reg_bits3 {
u8 regs_63_to_32[0x20];
- u8 regs_31_to_2[0x1e];
+ u8 regs_31_to_3[0x1d];
+ u8 mrtcq[0x1];
u8 mtctr[0x1];
u8 mtptm[0x1];
};
@@ -13138,4 +13213,12 @@ struct mlx5_ifc_msees_reg_bits {
u8 reserved_at_80[0x180];
};
+struct mlx5_ifc_mrtcq_reg_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 rt_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 9dd4bf157255..58a2401e4b55 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -146,9 +146,9 @@ struct mr_mfc {
unsigned long last_assert;
int minvif;
int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
+ atomic_long_t bytes;
+ atomic_long_t pkt;
+ atomic_long_t wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXVIFS];
refcount_t refcount;
diff --git a/include/linux/net.h b/include/linux/net.h
index b75bc534c1b3..0ff950eecc6b 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -343,8 +343,6 @@ static inline bool sendpages_ok(struct page *page, size_t len, size_t offset)
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
-int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index 662074b08c94..ff0758e88ea1 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -20,6 +20,33 @@ enum hwtstamp_source {
};
/**
+ * struct hwtstamp_provider_desc - hwtstamp provider description
+ *
+ * @index: index of the hwtstamp provider.
+ * @qualifier: hwtstamp provider qualifier.
+ */
+struct hwtstamp_provider_desc {
+ int index;
+ enum hwtstamp_provider_qualifier qualifier;
+};
+
+/**
+ * struct hwtstamp_provider - hwtstamp provider object
+ *
+ * @rcu_head: RCU callback used to free the struct.
+ * @source: source of the hwtstamp provider.
+ * @phydev: pointer of the phydev source in case a PTP coming from phylib
+ * @desc: hwtstamp provider description.
+ */
+
+struct hwtstamp_provider {
+ struct rcu_head rcu_head;
+ enum hwtstamp_source source;
+ struct phy_device *phydev;
+ struct hwtstamp_provider_desc desc;
+};
+
+/**
* struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config
*
* @flags: see struct hwtstamp_config
@@ -31,6 +58,7 @@ enum hwtstamp_source {
* copied the ioctl request back to user space
* @source: indication whether timestamps should come from the netdev or from
* an attached phylib PHY
+ * @qualifier: qualifier of the hwtstamp provider
*
* Prefer using this structure for in-kernel processing of hardware
* timestamping configuration, over the inextensible struct hwtstamp_config
@@ -43,6 +71,7 @@ struct kernel_hwtstamp_config {
struct ifreq *ifr;
bool copied_to_user;
enum hwtstamp_source source;
+ enum hwtstamp_provider_qualifier qualifier;
};
static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ecc686409161..8da4c61f97b9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -63,6 +63,7 @@ struct dsa_port;
struct ip_tunnel_parm_kern;
struct macsec_context;
struct macsec_ops;
+struct netdev_config;
struct netdev_name_node;
struct sd_flow_limit;
struct sfp_bus;
@@ -82,6 +83,7 @@ struct xdp_metadata_ops;
struct xdp_md;
struct ethtool_netdev_state;
struct phy_link_topology;
+struct hwtstamp_provider;
typedef u32 xdp_features_t;
@@ -381,8 +383,9 @@ struct napi_struct {
struct sk_buff *skb;
struct list_head rx_list; /* Pending GRO_NORMAL skbs */
int rx_count; /* length of rx_list */
- unsigned int napi_id;
+ unsigned int napi_id; /* protected by netdev_lock */
struct hrtimer timer;
+ /* all fields past this point are write-protected by netdev_lock */
struct task_struct *thread;
unsigned long gro_flush_timeout;
unsigned long irq_suspend_timeout;
@@ -509,7 +512,7 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n)
* is scheduled for example in the context of delayed timer
* that can be skipped if a NAPI is already scheduled.
*
- * Return True if NAPI is scheduled, False otherwise.
+ * Return: True if NAPI is scheduled, False otherwise.
*/
static inline bool napi_is_scheduled(struct napi_struct *n)
{
@@ -524,7 +527,7 @@ bool napi_schedule_prep(struct napi_struct *n);
*
* Schedule NAPI poll routine to be called if it is not already
* running.
- * Return true if we schedule a NAPI or false if not.
+ * Return: true if we schedule a NAPI or false if not.
* Refer to napi_schedule_prep() for additional reason on why
* a NAPI might not be scheduled.
*/
@@ -558,7 +561,7 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
* Mark NAPI processing as complete. Should only be called if poll budget
* has not been completely consumed.
* Prefer over napi_complete().
- * Return false if device should avoid rearming interrupts.
+ * Return: false if device should avoid rearming interrupts.
*/
bool napi_complete_done(struct napi_struct *n, int work_done);
@@ -569,16 +572,11 @@ static inline bool napi_complete(struct napi_struct *n)
int dev_set_threaded(struct net_device *dev, bool threaded);
-/**
- * napi_disable - prevent NAPI from scheduling
- * @n: NAPI context
- *
- * Stop NAPI from being scheduled on this context.
- * Waits till any outstanding processing completes.
- */
void napi_disable(struct napi_struct *n);
+void napi_disable_locked(struct napi_struct *n);
void napi_enable(struct napi_struct *n);
+void napi_enable_locked(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -2045,6 +2043,7 @@ enum netdev_reg_state {
*
* @neighbours: List heads pointing to this device's neighbours'
* dev_list, one per address-family.
+ * @hwprov: Tracks which PTP performs hardware packet time stamping.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
@@ -2259,7 +2258,7 @@ struct net_device {
void *atalk_ptr;
#endif
#if IS_ENABLED(CONFIG_AX25)
- void *ax25_ptr;
+ struct ax25_dev __rcu *ax25_ptr;
#endif
#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
@@ -2412,6 +2411,14 @@ struct net_device {
const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
struct udp_tunnel_nic *udp_tunnel_nic;
+ /** @cfg: net_device queue-related configuration */
+ struct netdev_config *cfg;
+ /**
+ * @cfg_pending: same as @cfg but when device is being actively
+ * reconfigured includes any changes to the configuration
+ * requested by the user, but which may or may not be rejected.
+ */
+ struct netdev_config *cfg_pending;
struct ethtool_netdev_state *ethtool;
/* protected by rtnl_lock */
@@ -2442,8 +2449,27 @@ struct net_device {
u32 napi_defer_hard_irqs;
/**
- * @lock: protects @net_shaper_hierarchy, feel free to use for other
- * netdev-scope protection. Ordering: take after rtnl_lock.
+ * @up: copy of @state's IFF_UP, but safe to read with just @lock.
+ * May report false negatives while the device is being opened
+ * or closed (@lock does not protect .ndo_open, or .ndo_close).
+ */
+ bool up;
+
+ /**
+ * @lock: netdev-scope lock, protects a small selection of fields.
+ * Should always be taken using netdev_lock() / netdev_unlock() helpers.
+ * Drivers are free to use it for other protection.
+ *
+ * Protects:
+ * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
+ * @net_shaper_hierarchy, @reg_state, @threaded
+ *
+ * Partially protects (writers must hold both @lock and rtnl_lock):
+ * @up
+ *
+ * Also protects some fields in struct napi_struct.
+ *
+ * Ordering: take after rtnl_lock.
*/
struct mutex lock;
@@ -2457,6 +2483,8 @@ struct net_device {
struct hlist_head neighbours[NEIGH_NR_TABLES];
+ struct hwtstamp_provider __rcu *hwprov;
+
u8 priv[] ____cacheline_aligned
__counted_by(priv_len);
} ____cacheline_aligned;
@@ -2667,18 +2695,58 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type,
struct napi_struct *napi);
-static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+static inline void netdev_lock(struct net_device *dev)
+{
+ mutex_lock(&dev->lock);
+}
+
+static inline void netdev_unlock(struct net_device *dev)
+{
+ mutex_unlock(&dev->lock);
+}
+
+static inline void netdev_assert_locked(struct net_device *dev)
+{
+ lockdep_assert_held(&dev->lock);
+}
+
+static inline void netdev_assert_locked_or_invisible(struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING)
+ netdev_assert_locked(dev);
+}
+
+static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq)
{
napi->irq = irq;
}
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+ netdev_lock(napi->dev);
+ netif_napi_set_irq_locked(napi, irq);
+ netdev_unlock(napi->dev);
+}
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
#define NAPI_POLL_WEIGHT 64
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+void netif_napi_add_weight_locked(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
+
+static inline void
+netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ netdev_lock(dev);
+ netif_napi_add_weight_locked(dev, napi, poll, weight);
+ netdev_unlock(dev);
+}
/**
* netif_napi_add() - initialize a NAPI context
@@ -2697,6 +2765,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi,
}
static inline void
+netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
netif_napi_add_tx_weight(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
@@ -2706,6 +2781,15 @@ netif_napi_add_tx_weight(struct net_device *dev,
netif_napi_add_weight(dev, napi, poll, weight);
}
+static inline void
+netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ napi->index = index;
+ napi->config = &dev->napi_config[index];
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
/**
* netif_napi_add_config - initialize a NAPI context with persistent config
* @dev: network device
@@ -2717,9 +2801,9 @@ static inline void
netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int index)
{
- napi->index = index;
- napi->config = &dev->napi_config[index];
- netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+ netdev_lock(dev);
+ netif_napi_add_config_locked(dev, napi, poll, index);
+ netdev_unlock(dev);
}
/**
@@ -2739,6 +2823,8 @@ static inline void netif_napi_add_tx(struct net_device *dev,
netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
+void __netif_napi_del_locked(struct napi_struct *napi);
+
/**
* __netif_napi_del - remove a NAPI context
* @napi: NAPI context
@@ -2747,7 +2833,18 @@ static inline void netif_napi_add_tx(struct net_device *dev,
* containing @napi. Drivers might want to call this helper to combine
* all the needed RCU grace periods into a single one.
*/
-void __netif_napi_del(struct napi_struct *napi);
+static inline void __netif_napi_del(struct napi_struct *napi)
+{
+ netdev_lock(napi->dev);
+ __netif_napi_del_locked(napi);
+ netdev_unlock(napi->dev);
+}
+
+static inline void netif_napi_del_locked(struct napi_struct *napi)
+{
+ __netif_napi_del_locked(napi);
+ synchronize_net();
+}
/**
* netif_napi_del - remove a NAPI context
@@ -2854,6 +2951,46 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
u64_stats_update_end(&lstats->syncp);
}
+static inline void dev_dstats_rx_add(struct net_device *dev,
+ unsigned int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_packets);
+ u64_stats_add(&dstats->rx_bytes, len);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_rx_dropped(struct net_device *dev)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_drops);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_tx_add(struct net_device *dev,
+ unsigned int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_add(&dstats->tx_bytes, len);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_tx_dropped(struct net_device *dev)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_drops);
+ u64_stats_update_end(&dstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -3194,7 +3331,6 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
-void init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
@@ -3208,7 +3344,6 @@ struct net_device *netdev_get_by_index(struct net *net, int ifindex,
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-struct net_device *dev_get_by_napi_id(unsigned int napi_id);
void netdev_copy_name(struct net_device *dev, char *name);
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -3322,6 +3457,7 @@ struct softnet_data {
};
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+DECLARE_PER_CPU(struct page_pool *, system_page_pool);
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
@@ -3810,7 +3946,7 @@ static inline bool netif_attr_test_mask(unsigned long j,
* @online_mask: bitmask for CPUs/Rx queues that are online
* @nr_bits: number of bits in the bitmask
*
- * Returns true if a CPU/Rx queue is online.
+ * Returns: true if a CPU/Rx queue is online.
*/
static inline bool netif_attr_test_online(unsigned long j,
const unsigned long *online_mask,
@@ -3830,7 +3966,8 @@ static inline bool netif_attr_test_online(unsigned long j,
* @srcp: the cpumask/Rx queue mask pointer
* @nr_bits: number of bits in the bitmask
*
- * Returns >= nr_bits if no further CPUs/Rx queues set.
+ * Returns: next (after n) CPU/Rx queue index in the mask;
+ * >= nr_bits if no further CPUs/Rx queues set.
*/
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
@@ -3852,7 +3989,8 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
* @src2p: the second CPUs/Rx queues mask pointer
* @nr_bits: number of bits in the bitmask
*
- * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ * Returns: next (after n) CPU/Rx queue index set in both masks;
+ * >= nr_bits if no further CPUs/Rx queues set in both.
*/
static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
@@ -3958,9 +4096,9 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog);
-void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
+ const struct bpf_prog *xdp_prog);
+void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
+int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
@@ -4037,6 +4175,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
+u8 dev_xdp_sb_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
u32 dev_get_min_mp_channel_count(const struct net_device *dev);
@@ -4248,7 +4387,7 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
unsigned long dev_trans_start(struct net_device *dev);
-void __netdev_watchdog_up(struct net_device *dev);
+void netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
void netif_carrier_off(struct net_device *dev);
@@ -4642,6 +4781,9 @@ int devm_register_netdev(struct device *dev, struct net_device *ndev);
/* General hardware address lists handling functions */
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
+int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5897f3dbaf7c..f39f688d7285 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -357,7 +357,7 @@ extern struct static_key xt_tee_enabled;
* Begin packet processing : all readers must wait the end
* 1) Must be called with preemption disabled
* 2) softirqs must be disabled too (or we should use this_cpu_add())
- * Returns :
+ * Returns:
* 1 if no recursion on this cpu
* 0 if recursion detected
*/
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
index 8676316547cc..3175073a66ba 100644
--- a/include/linux/netfilter_netdev.h
+++ b/include/linux/netfilter_netdev.h
@@ -66,7 +66,6 @@ static inline bool nf_hook_egress_active(void)
* @rc: result code which shall be returned by __dev_queue_xmit() on failure
* @dev: netdev whose egress hooks shall be applied to @skb
*
- * Returns @skb on success or %NULL if the packet was consumed or filtered.
* Caller must hold rcu_read_lock.
*
* On ingress, packets are classified first by tc, then by netfilter.
@@ -81,6 +80,8 @@ static inline bool nf_hook_egress_active(void)
* called recursively by tunnel drivers such as vxlan, the flag is reverted to
* false after sch_handle_egress(). This ensures that netfilter is applied
* both on the overlay and underlying network.
+ *
+ * Returns: @skb on success or %NULL if the packet was consumed or filtered.
*/
static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
struct net_device *dev)
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index b34301650c47..f91e50a76efd 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -57,7 +57,7 @@ static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
#endif
-void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
+int netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 5d36dcd06f60..0589d70bbe04 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -8,6 +8,83 @@
#include <linux/types.h>
#include <linux/bitops.h>
+#define GEN_PACKED_FIELD_STRUCT(__type) \
+ struct packed_field_ ## __type { \
+ __type startbit; \
+ __type endbit; \
+ __type offset; \
+ __type size; \
+ }
+
+/* struct packed_field_u8. Use with bit offsets < 256, buffers < 32B and
+ * unpacked structures < 256B.
+ */
+GEN_PACKED_FIELD_STRUCT(u8);
+
+/* struct packed_field_u16. Use with bit offsets < 65536, buffers < 8KB and
+ * unpacked structures < 64KB.
+ */
+GEN_PACKED_FIELD_STRUCT(u16);
+
+#define PACKED_FIELD(start, end, struct_name, struct_field) \
+{ \
+ (start), \
+ (end), \
+ offsetof(struct_name, struct_field), \
+ sizeof_field(struct_name, struct_field), \
+}
+
+#define CHECK_PACKED_FIELD_OVERLAP(fields, index1, index2) ({ \
+ typeof(&(fields)[0]) __f = (fields); \
+ typeof(__f[0]) _f1 = __f[index1]; typeof(__f[0]) _f2 = __f[index2]; \
+ const bool _ascending = __f[0].startbit < __f[1].startbit; \
+ BUILD_BUG_ON_MSG(_ascending && _f1.startbit >= _f2.startbit, \
+ __stringify(fields) " field " __stringify(index2) \
+ " breaks ascending order"); \
+ BUILD_BUG_ON_MSG(!_ascending && _f1.startbit <= _f2.startbit, \
+ __stringify(fields) " field " __stringify(index2) \
+ " breaks descending order"); \
+ BUILD_BUG_ON_MSG(max(_f1.endbit, _f2.endbit) <= \
+ min(_f1.startbit, _f2.startbit), \
+ __stringify(fields) " field " __stringify(index2) \
+ " overlaps with previous field"); \
+})
+
+#define CHECK_PACKED_FIELD(fields, index) ({ \
+ typeof(&(fields)[0]) _f = (fields); \
+ typeof(_f[0]) __f = _f[index]; \
+ BUILD_BUG_ON_MSG(__f.startbit < __f.endbit, \
+ __stringify(fields) " field " __stringify(index) \
+ " start bit must not be smaller than end bit"); \
+ BUILD_BUG_ON_MSG(__f.size != 1 && __f.size != 2 && \
+ __f.size != 4 && __f.size != 8, \
+ __stringify(fields) " field " __stringify(index) \
+ " has unsupported unpacked storage size"); \
+ BUILD_BUG_ON_MSG(__f.startbit - __f.endbit >= BITS_PER_BYTE * __f.size, \
+ __stringify(fields) " field " __stringify(index) \
+ " exceeds unpacked storage size"); \
+ __builtin_choose_expr(index != 0, \
+ CHECK_PACKED_FIELD_OVERLAP(fields, index - 1, index), \
+ 1); \
+})
+
+/* Note that the packed fields may be either in ascending or descending order.
+ * Thus, we must check that both the first and last field wit within the
+ * packed buffer size.
+ */
+#define CHECK_PACKED_FIELDS_SIZE(fields, pbuflen) ({ \
+ typeof(&(fields)[0]) _f = (fields); \
+ typeof(pbuflen) _len = (pbuflen); \
+ const size_t num_fields = ARRAY_SIZE(fields); \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_len), \
+ __stringify(fields) " pbuflen " __stringify(pbuflen) \
+ " must be a compile time constant"); \
+ BUILD_BUG_ON_MSG(_f[0].startbit >= BITS_PER_BYTE * _len, \
+ __stringify(fields) " first field exceeds packed buffer size"); \
+ BUILD_BUG_ON_MSG(_f[num_fields - 1].startbit >= BITS_PER_BYTE * _len, \
+ __stringify(fields) " last field exceeds packed buffer size"); \
+})
+
#define QUIRK_MSB_ON_THE_RIGHT BIT(0)
#define QUIRK_LITTLE_ENDIAN BIT(1)
#define QUIRK_LSW32_IS_FIRST BIT(2)
@@ -26,4 +103,352 @@ int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
size_t pbuflen, u8 quirks);
+void pack_fields_u8(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks);
+
+void pack_fields_u16(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks);
+
+void unpack_fields_u8(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks);
+
+void unpack_fields_u16(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks);
+
+/* Do not hand-edit the following packed field check macros!
+ *
+ * They are generated using scripts/gen_packed_field_checks.c, which may be
+ * built via "make scripts_gen_packed_field_checks". If larger macro sizes are
+ * needed in the future, please use this program to re-generate the macros and
+ * insert them here.
+ */
+
+#define CHECK_PACKED_FIELDS_1(fields) \
+ CHECK_PACKED_FIELD(fields, 0)
+
+#define CHECK_PACKED_FIELDS_2(fields) do { \
+ CHECK_PACKED_FIELDS_1(fields); \
+ CHECK_PACKED_FIELD(fields, 1); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_3(fields) do { \
+ CHECK_PACKED_FIELDS_2(fields); \
+ CHECK_PACKED_FIELD(fields, 2); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_4(fields) do { \
+ CHECK_PACKED_FIELDS_3(fields); \
+ CHECK_PACKED_FIELD(fields, 3); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_5(fields) do { \
+ CHECK_PACKED_FIELDS_4(fields); \
+ CHECK_PACKED_FIELD(fields, 4); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_6(fields) do { \
+ CHECK_PACKED_FIELDS_5(fields); \
+ CHECK_PACKED_FIELD(fields, 5); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_7(fields) do { \
+ CHECK_PACKED_FIELDS_6(fields); \
+ CHECK_PACKED_FIELD(fields, 6); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_8(fields) do { \
+ CHECK_PACKED_FIELDS_7(fields); \
+ CHECK_PACKED_FIELD(fields, 7); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_9(fields) do { \
+ CHECK_PACKED_FIELDS_8(fields); \
+ CHECK_PACKED_FIELD(fields, 8); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_10(fields) do { \
+ CHECK_PACKED_FIELDS_9(fields); \
+ CHECK_PACKED_FIELD(fields, 9); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_11(fields) do { \
+ CHECK_PACKED_FIELDS_10(fields); \
+ CHECK_PACKED_FIELD(fields, 10); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_12(fields) do { \
+ CHECK_PACKED_FIELDS_11(fields); \
+ CHECK_PACKED_FIELD(fields, 11); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_13(fields) do { \
+ CHECK_PACKED_FIELDS_12(fields); \
+ CHECK_PACKED_FIELD(fields, 12); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_14(fields) do { \
+ CHECK_PACKED_FIELDS_13(fields); \
+ CHECK_PACKED_FIELD(fields, 13); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_15(fields) do { \
+ CHECK_PACKED_FIELDS_14(fields); \
+ CHECK_PACKED_FIELD(fields, 14); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_16(fields) do { \
+ CHECK_PACKED_FIELDS_15(fields); \
+ CHECK_PACKED_FIELD(fields, 15); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_17(fields) do { \
+ CHECK_PACKED_FIELDS_16(fields); \
+ CHECK_PACKED_FIELD(fields, 16); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_18(fields) do { \
+ CHECK_PACKED_FIELDS_17(fields); \
+ CHECK_PACKED_FIELD(fields, 17); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_19(fields) do { \
+ CHECK_PACKED_FIELDS_18(fields); \
+ CHECK_PACKED_FIELD(fields, 18); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_20(fields) do { \
+ CHECK_PACKED_FIELDS_19(fields); \
+ CHECK_PACKED_FIELD(fields, 19); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_21(fields) do { \
+ CHECK_PACKED_FIELDS_20(fields); \
+ CHECK_PACKED_FIELD(fields, 20); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_22(fields) do { \
+ CHECK_PACKED_FIELDS_21(fields); \
+ CHECK_PACKED_FIELD(fields, 21); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_23(fields) do { \
+ CHECK_PACKED_FIELDS_22(fields); \
+ CHECK_PACKED_FIELD(fields, 22); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_24(fields) do { \
+ CHECK_PACKED_FIELDS_23(fields); \
+ CHECK_PACKED_FIELD(fields, 23); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_25(fields) do { \
+ CHECK_PACKED_FIELDS_24(fields); \
+ CHECK_PACKED_FIELD(fields, 24); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_26(fields) do { \
+ CHECK_PACKED_FIELDS_25(fields); \
+ CHECK_PACKED_FIELD(fields, 25); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_27(fields) do { \
+ CHECK_PACKED_FIELDS_26(fields); \
+ CHECK_PACKED_FIELD(fields, 26); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_28(fields) do { \
+ CHECK_PACKED_FIELDS_27(fields); \
+ CHECK_PACKED_FIELD(fields, 27); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_29(fields) do { \
+ CHECK_PACKED_FIELDS_28(fields); \
+ CHECK_PACKED_FIELD(fields, 28); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_30(fields) do { \
+ CHECK_PACKED_FIELDS_29(fields); \
+ CHECK_PACKED_FIELD(fields, 29); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_31(fields) do { \
+ CHECK_PACKED_FIELDS_30(fields); \
+ CHECK_PACKED_FIELD(fields, 30); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_32(fields) do { \
+ CHECK_PACKED_FIELDS_31(fields); \
+ CHECK_PACKED_FIELD(fields, 31); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_33(fields) do { \
+ CHECK_PACKED_FIELDS_32(fields); \
+ CHECK_PACKED_FIELD(fields, 32); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_34(fields) do { \
+ CHECK_PACKED_FIELDS_33(fields); \
+ CHECK_PACKED_FIELD(fields, 33); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_35(fields) do { \
+ CHECK_PACKED_FIELDS_34(fields); \
+ CHECK_PACKED_FIELD(fields, 34); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_36(fields) do { \
+ CHECK_PACKED_FIELDS_35(fields); \
+ CHECK_PACKED_FIELD(fields, 35); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_37(fields) do { \
+ CHECK_PACKED_FIELDS_36(fields); \
+ CHECK_PACKED_FIELD(fields, 36); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_38(fields) do { \
+ CHECK_PACKED_FIELDS_37(fields); \
+ CHECK_PACKED_FIELD(fields, 37); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_39(fields) do { \
+ CHECK_PACKED_FIELDS_38(fields); \
+ CHECK_PACKED_FIELD(fields, 38); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_40(fields) do { \
+ CHECK_PACKED_FIELDS_39(fields); \
+ CHECK_PACKED_FIELD(fields, 39); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_41(fields) do { \
+ CHECK_PACKED_FIELDS_40(fields); \
+ CHECK_PACKED_FIELD(fields, 40); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_42(fields) do { \
+ CHECK_PACKED_FIELDS_41(fields); \
+ CHECK_PACKED_FIELD(fields, 41); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_43(fields) do { \
+ CHECK_PACKED_FIELDS_42(fields); \
+ CHECK_PACKED_FIELD(fields, 42); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_44(fields) do { \
+ CHECK_PACKED_FIELDS_43(fields); \
+ CHECK_PACKED_FIELD(fields, 43); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_45(fields) do { \
+ CHECK_PACKED_FIELDS_44(fields); \
+ CHECK_PACKED_FIELD(fields, 44); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_46(fields) do { \
+ CHECK_PACKED_FIELDS_45(fields); \
+ CHECK_PACKED_FIELD(fields, 45); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_47(fields) do { \
+ CHECK_PACKED_FIELDS_46(fields); \
+ CHECK_PACKED_FIELD(fields, 46); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_48(fields) do { \
+ CHECK_PACKED_FIELDS_47(fields); \
+ CHECK_PACKED_FIELD(fields, 47); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_49(fields) do { \
+ CHECK_PACKED_FIELDS_48(fields); \
+ CHECK_PACKED_FIELD(fields, 48); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_50(fields) do { \
+ CHECK_PACKED_FIELDS_49(fields); \
+ CHECK_PACKED_FIELD(fields, 49); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS(fields) \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 1, ({ CHECK_PACKED_FIELDS_1(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 2, ({ CHECK_PACKED_FIELDS_2(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 3, ({ CHECK_PACKED_FIELDS_3(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 4, ({ CHECK_PACKED_FIELDS_4(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 5, ({ CHECK_PACKED_FIELDS_5(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 6, ({ CHECK_PACKED_FIELDS_6(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 7, ({ CHECK_PACKED_FIELDS_7(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 8, ({ CHECK_PACKED_FIELDS_8(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 9, ({ CHECK_PACKED_FIELDS_9(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 10, ({ CHECK_PACKED_FIELDS_10(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 11, ({ CHECK_PACKED_FIELDS_11(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 12, ({ CHECK_PACKED_FIELDS_12(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 13, ({ CHECK_PACKED_FIELDS_13(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 14, ({ CHECK_PACKED_FIELDS_14(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 15, ({ CHECK_PACKED_FIELDS_15(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 16, ({ CHECK_PACKED_FIELDS_16(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 17, ({ CHECK_PACKED_FIELDS_17(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 18, ({ CHECK_PACKED_FIELDS_18(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 19, ({ CHECK_PACKED_FIELDS_19(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 20, ({ CHECK_PACKED_FIELDS_20(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 21, ({ CHECK_PACKED_FIELDS_21(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 22, ({ CHECK_PACKED_FIELDS_22(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 23, ({ CHECK_PACKED_FIELDS_23(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 24, ({ CHECK_PACKED_FIELDS_24(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 25, ({ CHECK_PACKED_FIELDS_25(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 26, ({ CHECK_PACKED_FIELDS_26(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 27, ({ CHECK_PACKED_FIELDS_27(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 28, ({ CHECK_PACKED_FIELDS_28(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 29, ({ CHECK_PACKED_FIELDS_29(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 30, ({ CHECK_PACKED_FIELDS_30(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 31, ({ CHECK_PACKED_FIELDS_31(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 32, ({ CHECK_PACKED_FIELDS_32(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 33, ({ CHECK_PACKED_FIELDS_33(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 34, ({ CHECK_PACKED_FIELDS_34(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 35, ({ CHECK_PACKED_FIELDS_35(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 36, ({ CHECK_PACKED_FIELDS_36(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 37, ({ CHECK_PACKED_FIELDS_37(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 38, ({ CHECK_PACKED_FIELDS_38(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 39, ({ CHECK_PACKED_FIELDS_39(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 40, ({ CHECK_PACKED_FIELDS_40(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 41, ({ CHECK_PACKED_FIELDS_41(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 42, ({ CHECK_PACKED_FIELDS_42(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 43, ({ CHECK_PACKED_FIELDS_43(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 44, ({ CHECK_PACKED_FIELDS_44(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 45, ({ CHECK_PACKED_FIELDS_45(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 46, ({ CHECK_PACKED_FIELDS_46(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 47, ({ CHECK_PACKED_FIELDS_47(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 48, ({ CHECK_PACKED_FIELDS_48(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 49, ({ CHECK_PACKED_FIELDS_49(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 50, ({ CHECK_PACKED_FIELDS_50(fields); }), \
+ ({ BUILD_BUG_ON_MSG(1, "CHECK_PACKED_FIELDS() must be regenerated to support array sizes larger than 50."); }) \
+))))))))))))))))))))))))))))))))))))))))))))))))))
+
+/* End of generated content */
+
+#define pack_fields(pbuf, pbuflen, ustruct, fields, quirks) \
+ ({ \
+ CHECK_PACKED_FIELDS(fields); \
+ CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \
+ _Generic((fields), \
+ const struct packed_field_u8 * : pack_fields_u8, \
+ const struct packed_field_u16 * : pack_fields_u16 \
+ )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \
+ })
+
+#define unpack_fields(pbuf, pbuflen, ustruct, fields, quirks) \
+ ({ \
+ CHECK_PACKED_FIELDS(fields); \
+ CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \
+ _Generic((fields), \
+ const struct packed_field_u8 * : unpack_fields_u8, \
+ const struct packed_field_u16 * : unpack_fields_u16 \
+ )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \
+ })
+
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index db9b47ce3eef..414ee5fff66b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1047,6 +1047,20 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
/**
+ * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
+ * @vend: the vendor name
+ * @dev: the 16 bit PCI Device ID
+ * @subvend: the 16 bit PCI Subvendor ID
+ * @subdev: the 16 bit PCI Subdevice ID
+ *
+ * Generate the pci_device_id struct layout for the specific PCI
+ * device/subdevice. Private data may follow the output.
+ */
+#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
+ .subvendor = (subvend), .subdevice = (subdev), 0, 0
+
+/**
* PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
* @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
* @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index b5b5d17998b8..733f4ddd2ef1 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -50,7 +50,6 @@ struct dw_xpcs;
struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs);
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
-void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
int enable);
struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 563c46205685..19f076a71f94 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -32,19 +32,6 @@
#include <linux/atomic.h>
#include <net/eee.h>
-#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
- SUPPORTED_TP | \
- SUPPORTED_MII)
-
-#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \
- SUPPORTED_10baseT_Full)
-
-#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \
- SUPPORTED_100baseT_Full)
-
-#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
- SUPPORTED_1000baseT_Full)
-
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
@@ -62,16 +49,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
-#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
-#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
-#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
#define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features)
extern const int phy_basic_ports_array[3];
-extern const int phy_fibre_port_array[1];
-extern const int phy_all_ports_features_array[7];
extern const int phy_10_100_features_array[4];
extern const int phy_basic_t1_features_array[3];
extern const int phy_basic_t1s_p2mp_features_array[2];
@@ -298,6 +280,29 @@ static inline const char *phy_modes(phy_interface_t interface)
}
}
+/**
+ * rgmii_clock - map link speed to the clock rate
+ * @speed: link speed value
+ *
+ * Description: maps RGMII supported link speeds
+ * into the clock rates.
+ *
+ * Returns: clock rate or negative errno
+ */
+static inline long rgmii_clock(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return 2500000;
+ case SPEED_100:
+ return 25000000;
+ case SPEED_1000:
+ return 125000000;
+ default:
+ return -EINVAL;
+ }
+}
+
#define PHY_INIT_TIMEOUT 100000
#define PHY_FORCE_TIMEOUT 10
@@ -818,6 +823,24 @@ struct phy_tdr_config {
#define PHY_PAIR_ALL -1
/**
+ * enum link_inband_signalling - in-band signalling modes that are supported
+ *
+ * @LINK_INBAND_DISABLE: in-band signalling can be disabled
+ * @LINK_INBAND_ENABLE: in-band signalling can be enabled without bypass
+ * @LINK_INBAND_BYPASS: in-band signalling can be enabled with bypass
+ *
+ * The possible and required bits can only be used if the valid bit is set.
+ * If possible is clear, that means inband signalling can not be used.
+ * Required is only valid when possible is set, and means that inband
+ * signalling must be used.
+ */
+enum link_inband_signalling {
+ LINK_INBAND_DISABLE = BIT(0),
+ LINK_INBAND_ENABLE = BIT(1),
+ LINK_INBAND_BYPASS = BIT(2),
+};
+
+/**
* struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
* Avoidance) Reconciliation Sublayer.
*
@@ -957,6 +980,19 @@ struct phy_driver {
int (*get_features)(struct phy_device *phydev);
/**
+ * @inband_caps: query whether in-band is supported for the given PHY
+ * interface mode. Returns a bitmask of bits defined by enum
+ * link_inband_signalling.
+ */
+ unsigned int (*inband_caps)(struct phy_device *phydev,
+ phy_interface_t interface);
+
+ /**
+ * @config_inband: configure in-band mode for the PHY
+ */
+ int (*config_inband)(struct phy_device *phydev, unsigned int modes);
+
+ /**
* @get_rate_matching: Get the supported type of rate matching for a
* particular phy interface. This is used by phy consumers to determine
* whether to advertise lower-speed modes for that interface. It is
@@ -1090,6 +1126,53 @@ struct phy_driver {
int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
/* Get statistics from the PHY using ethtool */
+ /**
+ * @get_phy_stats: Retrieve PHY statistics.
+ * @dev: The PHY device for which the statistics are retrieved.
+ * @eth_stats: structure where Ethernet PHY stats will be stored.
+ * @stats: structure where additional PHY-specific stats will be stored.
+ *
+ * Retrieves the supported PHY statistics and populates the provided
+ * structures. The input structures are pre-initialized with
+ * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members
+ * corresponding to supported statistics. Unmodified members will remain
+ * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
+ */
+ void (*get_phy_stats)(struct phy_device *dev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats);
+
+ /**
+ * @get_link_stats: Retrieve link statistics.
+ * @dev: The PHY device for which the statistics are retrieved.
+ * @link_stats: structure where link-specific stats will be stored.
+ *
+ * Retrieves link-related statistics for the given PHY device. The input
+ * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the
+ * driver must only modify members corresponding to supported
+ * statistics. Unmodified members will remain set to
+ * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
+ */
+ void (*get_link_stats)(struct phy_device *dev,
+ struct ethtool_link_ext_stats *link_stats);
+
+ /**
+ * @update_stats: Trigger periodic statistics updates.
+ * @dev: The PHY device for which statistics updates are triggered.
+ *
+ * Periodically gathers statistics from the PHY device to update locally
+ * maintained 64-bit counters. This is necessary for PHYs that implement
+ * reduced-width counters (e.g., 16-bit or 32-bit) which can overflow
+ * more frequently compared to 64-bit counters. By invoking this
+ * callback, drivers can fetch the current counter values, handle
+ * overflow detection, and accumulate the results into local 64-bit
+ * counters for accurate reporting through the `get_phy_stats` and
+ * `get_link_stats` interfaces.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ int (*update_stats)(struct phy_device *dev);
+
/** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
/** @get_strings: Names of the statistic counters */
@@ -1580,6 +1663,9 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
if (phydev->drv->flags & PHY_POLL_CABLE_TEST)
return true;
+ if (phydev->drv->update_stats)
+ return true;
+
return phydev->irq == PHY_POLL;
}
@@ -1818,6 +1904,9 @@ int phy_config_aneg(struct phy_device *phydev);
int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface);
+int phy_config_inband(struct phy_device *phydev, unsigned int modes);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
bool phy_check_valid(int speed, int duplex, unsigned long *features);
@@ -1957,7 +2046,7 @@ int genphy_c45_plca_set_cfg(struct phy_device *phydev,
int genphy_c45_plca_get_status(struct phy_device *phydev,
struct phy_plca_status *plca_st);
int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp, bool *is_enabled);
+ unsigned long *lp);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data);
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
@@ -2014,6 +2103,7 @@ void phy_advertise_eee_all(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
void phy_support_eee(struct phy_device *phydev);
+void phy_disable_eee(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg);
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
@@ -2038,6 +2128,8 @@ int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
int phy_unregister_fixup_for_id(const char *bus_id);
int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+int phy_eee_tx_clock_stop_capable(struct phy_device *phydev);
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data);
@@ -2065,6 +2157,13 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data);
+
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats);
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats);
+
int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
struct phy_plca_cfg *plca_cfg);
int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h
index 1279f48c8a70..9d2d6090c86d 100644
--- a/include/linux/phylib_stubs.h
+++ b/include/linux/phylib_stubs.h
@@ -5,6 +5,9 @@
#include <linux/rtnetlink.h>
+struct ethtool_eth_phy_stats;
+struct ethtool_link_ext_stats;
+struct ethtool_phy_stats;
struct kernel_hwtstamp_config;
struct netlink_ext_ack;
struct phy_device;
@@ -19,6 +22,11 @@ struct phylib_stubs {
int (*hwtstamp_set)(struct phy_device *phydev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
+ void (*get_phy_stats)(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats);
+ void (*get_link_ext_stats)(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats);
};
static inline int phy_hwtstamp_get(struct phy_device *phydev,
@@ -50,6 +58,29 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev,
return phylib_stubs->hwtstamp_set(phydev, config, extack);
}
+static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return;
+
+ phylib_stubs->get_phy_stats(phydev, phy_stats, phydev_stats);
+}
+
+static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return;
+
+ phylib_stubs->get_link_ext_stats(phydev, link_stats);
+}
+
#else
static inline int phy_hwtstamp_get(struct phy_device *phydev,
@@ -65,4 +96,15 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev,
return -EOPNOTSUPP;
}
+static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+}
+
+static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+}
+
#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 5c01048860c4..898b00451bbf 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -5,6 +5,8 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <net/eee.h>
+
struct device_node;
struct ethtool_cmd;
struct fwnode_handle;
@@ -143,11 +145,17 @@ enum phylink_op_type {
* possible and avoid stopping it during suspend events.
* @default_an_inband: if true, defaults to MLO_AN_INBAND rather than
* MLO_AN_PHY. A fixed-link specification will override.
+ * @eee_rx_clk_stop_enable: if true, PHY can stop the receive clock during LPI
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
* @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
* are supported by the MAC/PCS.
+ * @lpi_interfaces: bitmap describing which PHY interface modes can support
+ * LPI signalling.
* @mac_capabilities: MAC pause/speed/duplex capabilities.
+ * @lpi_capabilities: MAC speeds which can support LPI signalling
+ * @lpi_timer_default: Default EEE LPI timer setting.
+ * @eee_enabled_default: If set, EEE will be enabled by phylink at creation time
*/
struct phylink_config {
struct device *dev;
@@ -156,10 +164,15 @@ struct phylink_config {
bool mac_managed_pm;
bool mac_requires_rxc;
bool default_an_inband;
+ bool eee_rx_clk_stop_enable;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ DECLARE_PHY_INTERFACE_MASK(lpi_interfaces);
unsigned long mac_capabilities;
+ unsigned long lpi_capabilities;
+ u32 lpi_timer_default;
+ bool eee_enabled_default;
};
void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
@@ -173,6 +186,8 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
* @mac_finish: finish a major reconfiguration of the interface.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
+ * @mac_disable_tx_lpi: disable LPI.
+ * @mac_enable_tx_lpi: enable and configure LPI.
*
* The individual methods are described more fully below.
*/
@@ -193,6 +208,9 @@ struct phylink_mac_ops {
struct phy_device *phy, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause);
+ void (*mac_disable_tx_lpi)(struct phylink_config *config);
+ int (*mac_enable_tx_lpi)(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop);
};
#if 0 /* For kernel-doc purposes only. */
@@ -387,12 +405,41 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
void mac_link_up(struct phylink_config *config, struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause);
+
+/**
+ * mac_disable_tx_lpi() - disable LPI generation at the MAC
+ * @config: a pointer to a &struct phylink_config.
+ *
+ * Disable generation of LPI at the MAC, effectively preventing the MAC
+ * from indicating that it is idle.
+ */
+void mac_disable_tx_lpi(struct phylink_config *config);
+
+/**
+ * mac_enable_tx_lpi() - configure and enable LPI generation at the MAC
+ * @config: a pointer to a &struct phylink_config.
+ * @timer: LPI timeout in microseconds.
+ * @tx_clk_stop: allow xMII transmit clock to be stopped during LPI
+ *
+ * Configure the LPI timeout accordingly. This will only be called when
+ * the link is already up, to cater for situations where the hardware
+ * needs to be programmed according to the link speed.
+ *
+ * Enable LPI generation at the MAC, and configure whether the xMII transmit
+ * clock may be stopped.
+ *
+ * Returns: 0 on success. Please consult with rmk before returning an error.
+ */
+int mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop);
#endif
struct phylink_pcs_ops;
/**
* struct phylink_pcs - PHYLINK PCS instance
+ * @supported_interfaces: describing which PHY_INTERFACE_MODE_xxx
+ * are supported by this PCS.
* @ops: a pointer to the &struct phylink_pcs_ops structure
* @phylink: pointer to &struct phylink_config
* @neg_mode: provide PCS neg mode via "mode" argument
@@ -409,6 +456,7 @@ struct phylink_pcs_ops;
* the PCS driver.
*/
struct phylink_pcs {
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
const struct phylink_pcs_ops *ops;
struct phylink *phylink;
bool neg_mode;
@@ -419,6 +467,7 @@ struct phylink_pcs {
/**
* struct phylink_pcs_ops - MAC PCS operations structure.
* @pcs_validate: validate the link configuration.
+ * @pcs_inband_caps: query inband support for interface mode.
* @pcs_enable: enable the PCS.
* @pcs_disable: disable the PCS.
* @pcs_pre_config: pre-mac_config method (for errata)
@@ -434,13 +483,15 @@ struct phylink_pcs {
struct phylink_pcs_ops {
int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state);
+ unsigned int (*pcs_inband_caps)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
int (*pcs_enable)(struct phylink_pcs *pcs);
void (*pcs_disable)(struct phylink_pcs *pcs);
void (*pcs_pre_config)(struct phylink_pcs *pcs,
phy_interface_t interface);
int (*pcs_post_config)(struct phylink_pcs *pcs,
phy_interface_t interface);
- void (*pcs_get_state)(struct phylink_pcs *pcs,
+ void (*pcs_get_state)(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state);
int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
@@ -471,6 +522,20 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state);
/**
+ * pcs_inband_caps - query PCS in-band capabilities for interface mode.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @interface: interface mode to be queried
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * for this interface mode.
+ */
+unsigned int pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+
+/**
* pcs_enable() - enable the PCS.
* @pcs: a pointer to a &struct phylink_pcs.
*/
@@ -485,6 +550,7 @@ void pcs_disable(struct phylink_pcs *pcs);
/**
* pcs_get_state() - Read the current inband link state from the hardware
* @pcs: a pointer to a &struct phylink_pcs.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Read the current inband link state from the MAC PCS, reporting the
@@ -493,8 +559,11 @@ void pcs_disable(struct phylink_pcs *pcs);
* negotiation completion state in @state->an_complete, and link up state
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
+ *
+ * Note that the @neg_mode parameter is always the PHYLINK_PCS_NEG_xxx
+ * state, not MLO_AN_xxx.
*/
-void pcs_get_state(struct phylink_pcs *pcs,
+void pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state);
/**
@@ -669,8 +738,9 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface)
}
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
- u16 bmsr, u16 lpa);
+ unsigned int neg_mode, u16 bmsr, u16 lpa);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state);
int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
const unsigned long *advertising);
diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h
index 0fc831338226..f5047983004f 100644
--- a/include/linux/pldmfw.h
+++ b/include/linux/pldmfw.h
@@ -125,9 +125,17 @@ struct pldmfw_ops;
* a pointer to their own data, used to implement the device specific
* operations.
*/
+
+enum pldmfw_update_mode {
+ PLDMFW_UPDATE_MODE_FULL,
+ PLDMFW_UPDATE_MODE_SINGLE_COMPONENT,
+};
+
struct pldmfw {
const struct pldmfw_ops *ops;
struct device *dev;
+ u16 component_identifier;
+ enum pldmfw_update_mode mode;
};
bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record);
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index 591a53e082e6..c773eeb92d04 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -5,15 +5,37 @@
#ifndef _LINUX_PSE_CONTROLLER_H
#define _LINUX_PSE_CONTROLLER_H
-#include <linux/ethtool.h>
#include <linux/list.h>
#include <uapi/linux/ethtool.h>
/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */
#define MAX_PI_CURRENT 1920000
+/* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */
+#define MAX_PI_PW 99900
struct phy_device;
struct pse_controller_dev;
+struct netlink_ext_ack;
+
+/* C33 PSE extended state and substate. */
+struct ethtool_c33_pse_ext_state_info {
+ enum ethtool_c33_pse_ext_state c33_pse_ext_state;
+ union {
+ enum ethtool_c33_pse_ext_substate_error_condition error_condition;
+ enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable;
+ enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted;
+ enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim;
+ enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected;
+ enum ethtool_c33_pse_ext_substate_power_not_available power_not_available;
+ enum ethtool_c33_pse_ext_substate_short_detected short_detected;
+ u32 __c33_pse_ext_substate;
+ };
+};
+
+struct ethtool_c33_pse_pw_limit_range {
+ u32 min;
+ u32 max;
+};
/**
* struct pse_control_config - PSE control/channel configuration.
@@ -29,7 +51,52 @@ struct pse_control_config {
};
/**
- * struct pse_control_status - PSE control/channel status.
+ * struct pse_admin_state - PSE operational state
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @c33_admin_state: operational state of the PSE
+ * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
+ */
+struct pse_admin_state {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_c33_pse_admin_state c33_admin_state;
+};
+
+/**
+ * struct pse_pw_status - PSE power detection status
+ *
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @c33_pw_status: power detection status of the PSE.
+ * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
+ */
+struct pse_pw_status {
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+ enum ethtool_c33_pse_pw_d_status c33_pw_status;
+};
+
+/**
+ * struct pse_ext_state_info - PSE extended state information
+ *
+ * @c33_ext_state_info: extended state information of the PSE
+ */
+struct pse_ext_state_info {
+ struct ethtool_c33_pse_ext_state_info c33_ext_state_info;
+};
+
+/**
+ * struct pse_pw_limit_ranges - PSE power limit configuration range
+ *
+ * @c33_pw_limit_ranges: supported power limit configuration range. The driver
+ * is in charge of the memory allocation.
+ */
+struct pse_pw_limit_ranges {
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+};
+
+/**
+ * struct ethtool_pse_control_status - PSE control/channel status.
*
* @podl_admin_state: operational state of the PoDL PSE
* functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
@@ -47,11 +114,11 @@ struct pse_control_config {
* @c33_avail_pw_limit: available power limit of the PSE in mW
* IEEE 802.3-2022 145.2.5.4 pse_avail_pwr
* @c33_pw_limit_ranges: supported power limit configuration range. The driver
- * is in charge of the memory allocation.
+ * is in charge of the memory allocation
* @c33_pw_limit_nb_ranges: number of supported power limit configuration
* ranges
*/
-struct pse_control_status {
+struct ethtool_pse_control_status {
enum ethtool_podl_pse_admin_state podl_admin_state;
enum ethtool_podl_pse_pw_d_status podl_pw_status;
enum ethtool_c33_pse_admin_state c33_admin_state;
@@ -67,40 +134,51 @@ struct pse_control_status {
/**
* struct pse_controller_ops - PSE controller driver callbacks
*
- * @ethtool_get_status: get PSE control status for ethtool interface
* @setup_pi_matrix: setup PI matrix of the PSE controller
- * @pi_is_enabled: Return 1 if the PSE PI is enabled, 0 if not.
- * May also return negative errno.
+ * @pi_get_admin_state: Get the operational state of the PSE PI. This ops
+ * is mandatory.
+ * @pi_get_pw_status: Get the power detection status of the PSE PI. This
+ * ops is mandatory.
+ * @pi_get_ext_state: Get the extended state of the PSE PI.
+ * @pi_get_pw_class: Get the power class of the PSE PI.
+ * @pi_get_actual_pw: Get actual power of the PSE PI in mW.
* @pi_enable: Configure the PSE PI as enabled.
* @pi_disable: Configure the PSE PI as disabled.
* @pi_get_voltage: Return voltage similarly to get_voltage regulator
- * callback.
- * @pi_get_current_limit: Get the configured current limit similarly to
- * get_current_limit regulator callback.
- * @pi_set_current_limit: Configure the current limit similarly to
- * set_current_limit regulator callback.
- * Should not return an error in case of MAX_PI_CURRENT
- * current value set.
+ * callback in uV.
+ * @pi_get_pw_limit: Get the configured power limit of the PSE PI in mW.
+ * @pi_set_pw_limit: Configure the power limit of the PSE PI in mW.
+ * @pi_get_pw_limit_ranges: Get the supported power limit configuration
+ * range. The driver is in charge of the memory
+ * allocation and should return the number of
+ * ranges.
*/
struct pse_controller_ops {
- int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
- unsigned long id, struct netlink_ext_ack *extack,
- struct pse_control_status *status);
int (*setup_pi_matrix)(struct pse_controller_dev *pcdev);
- int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_admin_state)(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state);
+ int (*pi_get_pw_status)(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status);
+ int (*pi_get_ext_state)(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info);
+ int (*pi_get_pw_class)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_actual_pw)(struct pse_controller_dev *pcdev, int id);
int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id);
- int (*pi_get_current_limit)(struct pse_controller_dev *pcdev,
- int id);
- int (*pi_set_current_limit)(struct pse_controller_dev *pcdev,
- int id, int max_uA);
+ int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev,
+ int id);
+ int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev,
+ int id, int max_mW);
+ int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges);
};
struct module;
struct device_node;
struct of_phandle_args;
struct pse_control;
+struct ethtool_pse_control_status;
/* PSE PI pairset pinout can either be Alternative A or Alternative B */
enum pse_pi_pairset_pinout {
@@ -177,15 +255,13 @@ void pse_control_put(struct pse_control *psec);
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status);
+ struct ethtool_pse_control_status *status);
int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config);
int pse_ethtool_set_pw_limit(struct pse_control *psec,
struct netlink_ext_ack *extack,
const unsigned int pw_limit);
-int pse_ethtool_get_pw_limit(struct pse_control *psec,
- struct netlink_ext_ack *extack);
bool pse_has_podl(struct pse_control *psec);
bool pse_has_c33(struct pse_control *psec);
@@ -203,7 +279,7 @@ static inline void pse_control_put(struct pse_control *psec)
static inline int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+ struct ethtool_pse_control_status *status)
{
return -EOPNOTSUPP;
}
@@ -222,12 +298,6 @@ static inline int pse_ethtool_set_pw_limit(struct pse_control *psec,
return -EOPNOTSUPP;
}
-static inline int pse_ethtool_get_pw_limit(struct pse_control *psec,
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
-
static inline bool pse_has_podl(struct pse_control *psec)
{
return false;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index c892d22ce0a7..0d68d09bedd1 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -307,7 +307,7 @@ static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm)
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
*
- * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * Returns: a valid pointer on success or PTR_ERR on failure. If PHC
* support is missing at the configuration level, this function
* returns NULL, and drivers are expected to gracefully handle that
* case separately.
@@ -445,7 +445,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
* @hwtstamp: timestamp
* @vclock_index: phc index of ptp vclock.
*
- * Returns converted timestamp, or 0 on error.
+ * Returns: converted timestamp, or 0 on error.
*/
ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index);
#else
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index fd037c127bb0..551329220e4f 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -615,15 +615,14 @@ static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp
/*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
- * In particular if you consume ring in interrupt or BH context, you must
- * disable interrupts/BH when doing so.
+ * In particular if you consume ring in BH context, you must
+ * disable BH when doing so.
*/
-static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
- unsigned int nrings,
- int size,
- gfp_t gfp, void (*destroy)(void *))
+static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings,
+ unsigned int nrings,
+ int size, gfp_t gfp,
+ void (*destroy)(void *))
{
- unsigned long flags;
void ***queues;
int i;
@@ -638,12 +637,12 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
}
for (i = 0; i < nrings; ++i) {
- spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
+ spin_lock_bh(&(rings[i])->consumer_lock);
spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
spin_unlock(&(rings[i])->producer_lock);
- spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
+ spin_unlock_bh(&(rings[i])->consumer_lock);
}
for (i = 0; i < nrings; ++i)
@@ -662,8 +661,8 @@ nomem:
noqueues:
return -ENOMEM;
}
-#define ptr_ring_resize_multiple(...) \
- alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__))
+#define ptr_ring_resize_multiple_bh(...) \
+ alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
{
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 997b34197385..6816e4c5f3f0 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -241,7 +241,7 @@ bool rfkill_soft_blocked(struct rfkill *rfkill);
* rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that corresponds to the name.
+ * Returns: enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 14b88f551920..4bc2ee0b10b0 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -78,7 +78,7 @@ static inline bool lockdep_rtnl_is_held(void)
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
- * Return the value of the specified RCU-protected pointer, but omit
+ * Return: the value of the specified RCU-protected pointer, but omit
* the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
@@ -102,6 +102,7 @@ void __rtnl_net_unlock(struct net *net);
void rtnl_net_lock(struct net *net);
void rtnl_net_unlock(struct net *net);
int rtnl_net_trylock(struct net *net);
+int rtnl_net_lock_killable(struct net *net);
int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b);
bool rtnl_net_is_locked(struct net *net);
@@ -138,6 +139,11 @@ static inline int rtnl_net_trylock(struct net *net)
return rtnl_trylock();
}
+static inline int rtnl_net_lock_killable(struct net *net)
+{
+ return rtnl_lock_killable();
+}
+
static inline void ASSERT_RTNL_NET(struct net *net)
{
ASSERT_RTNL();
@@ -178,6 +184,12 @@ void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
+/* Shared by rtnl_fdb_dump() and various ndo_fdb_dump() helpers. */
+struct ndo_fdb_dump_context {
+ unsigned long ifindex;
+ unsigned long fdb_idx;
+};
+
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 926496c9cc9c..bf178238a308 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -199,17 +199,18 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
}
-static inline int skb_array_resize_multiple_noprof(struct skb_array **rings,
- int nrings, unsigned int size,
- gfp_t gfp)
+static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings,
+ int nrings,
+ unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
- return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings,
- nrings, size, gfp,
- __skb_array_destroy_skb);
+ return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings,
+ nrings, size, gfp,
+ __skb_array_destroy_skb);
}
-#define skb_array_resize_multiple(...) \
- alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__))
+#define skb_array_resize_multiple_bh(...) \
+ alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void skb_array_cleanup(struct skb_array *a)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 58009fa66102..bb2b751d274a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -608,11 +608,19 @@ struct skb_shared_info {
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
- unsigned int xdp_frags_size;
- /* Intermediate layers must ensure that destructor_arg
- * remains valid until skb destructor */
- void * destructor_arg;
+ union {
+ struct {
+ u32 xdp_frags_size;
+ u32 xdp_frags_truesize;
+ };
+
+ /*
+ * Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor.
+ */
+ void *destructor_arg;
+ };
/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
@@ -1134,7 +1142,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
* skb_dst - returns skb dst_entry
* @skb: buffer
*
- * Returns skb dst_entry, regardless of reference taken or not.
+ * Returns: skb dst_entry, regardless of reference taken or not.
*/
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
@@ -1222,7 +1230,7 @@ static inline bool skb_wifi_acked_valid(const struct sk_buff *skb)
* skb_unref - decrement the skb's reference count
* @skb: buffer
*
- * Returns true if we can free the skb.
+ * Returns: true if we can free the skb.
*/
static inline bool skb_unref(struct sk_buff *skb)
{
@@ -1344,7 +1352,7 @@ struct sk_buff_fclones {
* @sk: socket
* @skb: buffer
*
- * Returns true if skb is a fast clone, and its clone is not freed.
+ * Returns: true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
* so we also check that didn't happen.
*/
@@ -3516,7 +3524,7 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
* A page shouldn't be considered for reusing/recycling if it was allocated
* under memory pressure or at a distant memory node.
*
- * Returns false if this page should be returned to page allocator, true
+ * Returns: false if this page should be returned to page allocator, true
* otherwise.
*/
static inline bool dev_page_is_reusable(const struct page *page)
@@ -3627,13 +3635,13 @@ static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
- * Returns the address of the data within @frag. The page must already
+ * Returns: the address of the data within @frag. The page must already
* be mapped.
*/
static inline void *skb_frag_address(const skb_frag_t *frag)
@@ -3648,7 +3656,7 @@ static inline void *skb_frag_address(const skb_frag_t *frag)
* skb_frag_address_safe - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
- * Returns the address of the data within @frag. Checks that the page
+ * Returns: the address of the data within @frag. Checks that the page
* is mapped and returns %NULL otherwise.
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
@@ -3674,7 +3682,7 @@ static inline void skb_frag_page_copy(skb_frag_t *fragto,
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/**
- * skb_frag_dma_map - maps a paged fragment via the DMA API
+ * __skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to
* @frag: the paged fragment to map
* @offset: the offset within the fragment (starting at the
@@ -3684,15 +3692,36 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
*
* Maps the page associated with @frag to @device.
*/
-static inline dma_addr_t skb_frag_dma_map(struct device *dev,
- const skb_frag_t *frag,
- size_t offset, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
{
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
}
+#define skb_frag_dma_map(dev, frag, ...) \
+ CONCATENATE(_skb_frag_dma_map, \
+ COUNT_ARGS(__VA_ARGS__))(dev, frag, ##__VA_ARGS__)
+
+#define __skb_frag_dma_map1(dev, frag, offset, uf, uo) ({ \
+ const skb_frag_t *uf = (frag); \
+ size_t uo = (offset); \
+ \
+ __skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \
+ DMA_TO_DEVICE); \
+})
+#define _skb_frag_dma_map1(dev, frag, offset) \
+ __skb_frag_dma_map1(dev, frag, offset, __UNIQUE_ID(frag_), \
+ __UNIQUE_ID(offset_))
+#define _skb_frag_dma_map0(dev, frag) \
+ _skb_frag_dma_map1(dev, frag, 0)
+#define _skb_frag_dma_map2(dev, frag, offset, size) \
+ __skb_frag_dma_map(dev, frag, offset, size, DMA_TO_DEVICE)
+#define _skb_frag_dma_map3(dev, frag, offset, size, dir) \
+ __skb_frag_dma_map(dev, frag, offset, size, dir)
+
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask)
{
@@ -3890,7 +3919,7 @@ static inline int skb_linearize(struct sk_buff *skb)
* skb_has_shared_frag - can any frag be overwritten
* @skb: buffer to test
*
- * Return true if the skb has at least one frag that might be modified
+ * Return: true if the skb has at least one frag that might be modified
* by an external entity (as in vmsplice()/sendfile())
*/
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
@@ -4612,7 +4641,7 @@ static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
/* Check if we need to perform checksum complete validation.
*
- * Returns true if checksum complete is needed, false otherwise
+ * Returns: true if checksum complete is needed, false otherwise
* (either checksum is unnecessary or zero checksum is allowed).
*/
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d79ff252cfdc..c9878a612e53 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -33,7 +33,9 @@
#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
-#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/124 */
+#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_scr_i/204 */
+#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_scr_i/324 */
/* MTL algorithms identifiers */
#define MTL_TX_ALGORITHM_WRR 0x0
@@ -250,8 +252,8 @@ struct plat_stmmacenet_data {
struct clk *stmmac_clk;
struct clk *pclk;
struct clk *clk_ptp_ref;
- unsigned int clk_ptp_rate;
- unsigned int clk_ref_rate;
+ unsigned long clk_ptp_rate;
+ unsigned long clk_ref_rate;
unsigned int mult_fact_100ns;
s32 ptp_max_adj;
u32 cdc_error_adj;
@@ -263,7 +265,7 @@ struct plat_stmmacenet_data {
int mac_port_sel_speed;
int has_xgmac;
u8 vlan_fail_q;
- unsigned int eee_usecs_rate;
+ unsigned long eee_usecs_rate;
struct pci_dev *pdev;
int int_snapshot_num;
int msi_mac_vec;
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 79c781875c09..a4d6cc0c9f68 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -97,7 +97,7 @@ struct wwan_port_caps {
*
* This function must be balanced with a call to wwan_remove_port().
*
- * Returns a valid pointer to wwan_port on success or PTR_ERR on failure
+ * Returns: a valid pointer to wwan_port on success or PTR_ERR on failure
*/
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,