diff options
author | Johannes Berg <johannes.berg@intel.com> | 2017-06-08 15:14:40 +0300 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2017-06-08 15:14:45 +0300 |
commit | a43e61842ec55baa486d60eed2a19af67ba78b9f (patch) | |
tree | 6c3c93f19e3933273b73b9edd16edc146ab18527 /include | |
parent | 5d473fedd17ae3a9f92fb35551e307d01459ea6a (diff) | |
parent | 50dffe7fad6c156c2928e45c19ff7b86eb951f4c (diff) | |
download | linux-a43e61842ec55baa486d60eed2a19af67ba78b9f.tar.xz |
Merge remote-tracking branch 'net-next/master' into mac80211-next
This brings in commit 7a7c0a6438b8 ("mac80211: fix TX aggregation
start/stop callback race") to allow the follow-up cleanup.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'include')
110 files changed, 2093 insertions, 485 deletions
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index c0bd0d7651a9..bb837310c07e 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux); int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); int drm_dp_stop_crc(struct drm_dp_aux *aux); +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; + u32 quirks; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); + +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_LIMITED_M_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. + */ + DP_DPCD_QUIRK_LIMITED_M_N, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device decriptor filled by drm_dp_read_desc() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +{ + return desc->quirks & BIT(quirk); +} + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 97b8d3728b31..ef718586321c 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -195,7 +195,10 @@ struct vgic_dist { /* either a GICv2 CPU interface */ gpa_t vgic_cpu_base; /* or a number of GICv3 redistributor regions */ - gpa_t vgic_redist_base; + struct { + gpa_t vgic_redist_base; + gpa_t vgic_redist_free_offset; + }; }; /* distributor enabled */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h new file mode 100644 index 000000000000..c893b9520a67 --- /dev/null +++ b/include/linux/avf/virtchnl.h @@ -0,0 +1,701 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_NOT_SUPPORTED = -64, +}; + +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, +}; + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures.*/ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF offload flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 + +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u32 pad1; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct eth_stats in an external buffer. + */ + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + struct { + enum virtchnl_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_ERR_PARAM; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c47aa248c640..fcd641032f8d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); bool blk_mq_queue_stopped(struct request_queue *q); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6bb38d76faf4..c32bace66d3d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -46,6 +46,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; u32 pages; + u32 id; struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; @@ -171,6 +172,8 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + u32 stack_depth; + u32 id; struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index aa2e19182d99..51c5bd64bd00 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -3,6 +3,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/string.h> + #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG /* @@ -12,12 +14,10 @@ */ # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) -extern const char *ceph_file_part(const char *s, int len); # define dout(fmt, ...) \ pr_debug("%.*s %12.12s:%-4d : " fmt, \ 8 - (int)sizeof(KBUILD_MODNAME), " ", \ - ceph_file_part(__FILE__, sizeof(__FILE__)), \ - __LINE__, ##__VA_ARGS__) + kbasename(__FILE__), __LINE__, ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ # define dout(fmt, ...) do { \ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 21745946cae1..ec47101cb1bf 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -48,6 +48,7 @@ enum { CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ CSS_VISIBLE = (1 << 3), /* css is visible to userland */ + CSS_DYING = (1 << 4), /* css is dying */ }; /* bits in struct cgroup flags field */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed2573e149fa..710a005c6b7a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) } /** + * css_is_dying - test whether the specified css is dying + * @css: target css + * + * Test whether @css is in the process of offlining or already offline. In + * most cases, ->css_online() and ->css_offline() callbacks should be + * enough; however, the actual offline operations are RCU delayed and this + * test returns %true also when @css is scheduled to be offlined. + * + * This is useful, for example, when the use case requires synchronous + * behavior with respect to cgroup removal. cgroup removal schedules css + * offlining but the css can seem alive while the operation is being + * delayed. If the delay affects user visible semantics, this test can be + * used to resolve the situation. + */ +static inline bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + +/** * css_put - put a css reference * @css: target css * diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index de179993e039..ea9126006a69 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -15,3 +15,10 @@ * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* + * GCC does not warn about unused static inline functions for + * -Wunused-function. This turns out to avoid the need for complex #ifdef + * directives. Suppress the warning in clang as well. + */ +#define inline inline __attribute__((unused)) diff --git a/include/linux/dax.h b/include/linux/dax.h index 00ebac854bb7..5ec1f6c47716 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -18,6 +18,20 @@ struct dax_operations { void **, pfn_t *); }; +#if IS_ENABLED(CONFIG_DAX) +struct dax_device *dax_get_by_host(const char *host); +void put_dax(struct dax_device *dax_dev); +#else +static inline struct dax_device *dax_get_by_host(const char *host) +{ + return NULL; +} + +static inline void put_dax(struct dax_device *dax_dev) +{ +} +#endif + int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) int __bdev_dax_supported(struct super_block *sb, int blocksize); @@ -25,23 +39,29 @@ static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { return __bdev_dax_supported(sb, blocksize); } + +static inline struct dax_device *fs_dax_get_by_host(const char *host) +{ + return dax_get_by_host(host); +} + +static inline void fs_put_dax(struct dax_device *dax_dev) +{ + put_dax(dax_dev); +} + #else static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { return -EOPNOTSUPP; } -#endif -#if IS_ENABLED(CONFIG_DAX) -struct dax_device *dax_get_by_host(const char *host); -void put_dax(struct dax_device *dax_dev); -#else -static inline struct dax_device *dax_get_by_host(const char *host) +static inline struct dax_device *fs_dax_get_by_host(const char *host) { return NULL; } -static inline void put_dax(struct dax_device *dax_dev) +static inline void fs_put_dax(struct dax_device *dax_dev) { } #endif diff --git a/include/linux/filter.h b/include/linux/filter.h index 56197f82af45..1fa26dc562ce 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -57,6 +57,9 @@ struct bpf_prog_aux; #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +/* unused opcode to mark special call to bpf_tail_call() helper */ +#define BPF_TAIL_CALL 0xf0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -66,8 +69,6 @@ struct bpf_prog_aux; /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 -#define BPF_TAG_SIZE 8 - /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ @@ -272,6 +273,16 @@ struct bpf_prog_aux; .off = OFF, \ .imm = IMM }) +/* Unconditional jumps, goto pc + off16 */ + +#define BPF_JMP_A(OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = 0 }) + /* Function call */ #define BPF_EMIT_CALL(FUNC) \ @@ -419,6 +430,7 @@ struct bpf_prog { kmemcheck_bitfield_end(meta); enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ + u32 jited_len; /* Size of jited insns in bytes */ u8 tag[BPF_TAG_SIZE]; struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2b1a44f5bdb6..a89d37e8b387 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -41,7 +41,7 @@ struct vm_area_struct; #define ___GFP_WRITE 0x800000u #define ___GFP_KSWAPD_RECLAIM 0x1000000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +#define ___GFP_NOLOCKDEP 0x2000000u #else #define ___GFP_NOLOCKDEP 0 #endif diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index c0d712d22b07..f738d50cc17d 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -56,7 +56,14 @@ struct gpiod_lookup_table { .flags = _flags, \ } +#ifdef CONFIG_GPIOLIB void gpiod_add_lookup_table(struct gpiod_lookup_table *table); void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); +#else +static inline +void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} +static inline +void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} +#endif #endif /* __LINUX_GPIO_MACHINE_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 0c16866a7aac..3cd18ac0697f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -62,6 +62,7 @@ int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +bool br_multicast_enabled(const struct net_device *dev); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -78,6 +79,19 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, { return false; } +static inline bool br_multicast_enabled(const struct net_device *dev) +{ + return false; +} +#endif + +#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) +bool br_vlan_enabled(const struct net_device *dev); +#else +static inline bool br_vlan_enabled(const struct net_device *dev) +{ + return false; +} #endif #endif diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3482c3c2037d..4837157da0dc 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -3,6 +3,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); +struct skb_array *tap_get_skb_array(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -12,6 +13,10 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } +static inline struct skb_array *tap_get_skb_array(struct file *f) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TAP */ #include <net/sock.h> diff --git a/include/linux/if_team.h b/include/linux/if_team.h index c05216a8fbac..30294603526f 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -298,4 +298,6 @@ extern void team_mode_unregister(const struct team_mode *mode); #define TEAM_DEFAULT_NUM_TX_QUEUES 16 #define TEAM_DEFAULT_NUM_RX_QUEUES 16 +#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind) + #endif /* _LINUX_IF_TEAM_H_ */ diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index ed6da2e6df90..bf9bdf42d577 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -19,6 +19,7 @@ #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); +struct skb_array *tun_get_skb_array(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -28,5 +29,9 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } +static inline struct skb_array *tun_get_skb_array(struct file *f) +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 8d5fcd6284ce..283dc2f5364d 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, netdev_features_t features) { - if (skb_vlan_tagged_multi(skb)) - features = netdev_intersect_features(features, - NETIF_F_SG | - NETIF_F_HIGHDMA | - NETIF_F_FRAGLIST | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); + if (skb_vlan_tagged_multi(skb)) { + /* In the case of multi-tagged packets, use a direct mask + * instead of using netdev_interesect_features(), to make + * sure that only devices supporting NETIF_F_HW_CSUM will + * have checksum offloading support. + */ + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + } return features; } diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 36872fbb815d..734377ad42e9 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) +#ifndef __jiffy_arch_data +#define __jiffy_arch_data +#endif + /* * The 64-bit value is not atomic - you MUST NOT read it * without sampling the sequence number in jiffies_lock. * get_jiffies_64() will do this for you as appropriate. */ extern u64 __cacheline_aligned_in_smp jiffies_64; -extern unsigned long volatile __cacheline_aligned_in_smp jiffies; +extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 30f90c1a0aaf..541df0b5b815 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -349,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif +extern void wait_for_kprobe_optimizer(void); +#else +static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ #ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 4ce24a376262..8098695e5d8d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) } #endif +extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, + phys_addr_t end_addr); #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { return 0; } +static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, + phys_addr_t end_addr) +{ + return 0; +} + #endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index f541da68d1e7..472fa4d4ea62 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,8 @@ #define PHY_ID_KSZ8795 0x00221550 +#define PHY_ID_KSZ9477 0x00221631 + /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 diff --git a/include/linux/mii.h b/include/linux/mii.h index 1629a0c32679..e870bfa6abfe 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -31,7 +31,7 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); -extern int mii_ethtool_get_link_ksettings( +extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern int mii_ethtool_set_link_ksettings( diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index b4ee8f62ce8d..8e2828d48d7f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -470,6 +470,7 @@ struct mlx4_update_qp_params { u16 rate_val; }; +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dd9a263ed368..b26a478930eb 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -300,6 +300,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, + + MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, }; enum { @@ -787,8 +789,14 @@ enum { }; enum { - CQE_RSS_HTYPE_IP = 0x3 << 6, - CQE_RSS_HTYPE_L4 = 0x3 << 2, + CQE_RSS_HTYPE_IP = 0x3 << 2, + /* cqe->rss_hash_type[3:2] - IP destination selected for hash + * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) + */ + CQE_RSS_HTYPE_L4 = 0x3 << 6, + /* cqe->rss_hash_type[7:6] - L4 destination selected for hash + * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI + */ }; enum { @@ -967,6 +975,7 @@ enum mlx5_cap_type { MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, + MLX5_CAP_FPGA, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1088,6 +1097,9 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) +#define MLX5_CAP_FPGA(mdev, cap) \ + MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index bcdf739ee41a..6ea2f5734e37 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -108,6 +108,8 @@ enum { MLX5_REG_QTCT = 0x400a, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, + MLX5_REG_FPGA_CAP = 0x4022, + MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -761,6 +763,9 @@ struct mlx5_core_dev { atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; +#ifdef CONFIG_MLX5_FPGA + struct mlx5_fpga_device *fpga; +#endif #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif @@ -787,7 +792,12 @@ enum { typedef void (*mlx5_cmd_cbk_t)(int status, void *context); +enum { + MLX5_CMD_ENT_STATE_PENDING_COMP, +}; + struct mlx5_cmd_work_ent { + unsigned long state; struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; void *uout; @@ -890,11 +900,6 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } -static inline void *mlx5_vzalloc(unsigned long size) -{ - return kvzalloc(size, GFP_KERNEL); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; @@ -920,6 +925,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_trigger_health_work(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); @@ -976,7 +982,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 32de0724b400..56e96f6a0a45 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -32,6 +32,8 @@ #ifndef MLX5_IFC_H #define MLX5_IFC_H +#include "mlx5_ifc_fpga.h" + enum { MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, @@ -56,7 +58,8 @@ enum { MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, - MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, + MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, }; enum { @@ -766,6 +769,12 @@ enum { MLX5_CAP_PORT_TYPE_ETH = 0x1, }; +enum { + MLX5_CAP_UMR_FENCE_STRONG = 0x0, + MLX5_CAP_UMR_FENCE_SMALL = 0x1, + MLX5_CAP_UMR_FENCE_NONE = 0x2, +}; + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x80]; @@ -854,7 +863,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_tc[0x4]; u8 reserved_at_1d0[0x1]; u8 dcbx[0x1]; - u8 reserved_at_1d2[0x4]; + u8 reserved_at_1d2[0x3]; + u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_at_1d8[0x1]; @@ -875,7 +885,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_202[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_205[0xa]; + u8 reserved_at_205[0x5]; + u8 umr_fence[0x2]; + u8 reserved_at_20c[0x3]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; @@ -2186,6 +2198,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; + struct mlx5_ifc_fpga_cap_bits fpga_cap; u8 reserved_at_0[0x8000]; }; @@ -8182,6 +8195,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_sltp_reg_bits sltp_reg; struct mlx5_ifc_mtpps_reg_bits mtpps_reg; struct mlx5_ifc_mtppse_reg_bits mtppse_reg; + struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; + struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h new file mode 100644 index 000000000000..0032d10ac6cf --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef MLX5_IFC_FPGA_H +#define MLX5_IFC_FPGA_H + +struct mlx5_ifc_fpga_shell_caps_bits { + u8 max_num_qps[0x10]; + u8 reserved_at_10[0x8]; + u8 total_rcv_credits[0x8]; + + u8 reserved_at_20[0xe]; + u8 qp_type[0x2]; + u8 reserved_at_30[0x5]; + u8 rae[0x1]; + u8 rwe[0x1]; + u8 rre[0x1]; + u8 reserved_at_38[0x4]; + u8 dc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 reserved_at_40[0x1a]; + u8 log_ddr_size[0x6]; + + u8 max_fpga_qp_msg_size[0x20]; + + u8 reserved_at_80[0x180]; +}; + +struct mlx5_ifc_fpga_cap_bits { + u8 fpga_id[0x8]; + u8 fpga_device[0x18]; + + u8 register_file_ver[0x20]; + + u8 fpga_ctrl_modify[0x1]; + u8 reserved_at_41[0x5]; + u8 access_reg_query_mode[0x2]; + u8 reserved_at_48[0x6]; + u8 access_reg_modify_mode[0x2]; + u8 reserved_at_50[0x10]; + + u8 reserved_at_60[0x20]; + + u8 image_version[0x20]; + + u8 image_date[0x20]; + + u8 image_time[0x20]; + + u8 shell_version[0x20]; + + u8 reserved_at_100[0x80]; + + struct mlx5_ifc_fpga_shell_caps_bits shell_caps; + + u8 reserved_at_380[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 sandbox_product_version[0x10]; + u8 sandbox_product_id[0x10]; + + u8 sandbox_basic_caps[0x20]; + + u8 reserved_at_3e0[0x10]; + u8 sandbox_extended_caps_len[0x10]; + + u8 sandbox_extended_caps_addr[0x40]; + + u8 fpga_ddr_start_addr[0x40]; + + u8 fpga_cr_space_start_addr[0x40]; + + u8 fpga_ddr_size[0x20]; + + u8 fpga_cr_space_size[0x20]; + + u8 reserved_at_500[0x300]; +}; + +struct mlx5_ifc_fpga_ctrl_bits { + u8 reserved_at_0[0x8]; + u8 operation[0x8]; + u8 reserved_at_10[0x8]; + u8 status[0x8]; + + u8 reserved_at_20[0x8]; + u8 flash_select_admin[0x8]; + u8 reserved_at_30[0x8]; + u8 flash_select_oper[0x8]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1, + MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2, + MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3, + MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4, + MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5, + MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6, + MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7, +}; + +struct mlx5_ifc_fpga_error_event_bits { + u8 reserved_at_0[0x40]; + + u8 reserved_at_40[0x18]; + u8 syndrome[0x8]; + + u8 reserved_at_60[0x80]; +}; + +#endif /* MLX5_IFC_FPGA_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 7cb17c6b97de..b892e95d4929 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ +static inline int vm_fault_to_errno(int vm_fault, int foll_flags) +{ + if (vm_fault & VM_FAULT_OOM) + return -ENOMEM; + if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; + if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + return 0; +} + typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ebaccd4e7d8c..ef6a13b7bd3e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -678,6 +678,7 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; + unsigned long static_init_size; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 566fda587fcf..3f74ef2281e8 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -467,6 +467,7 @@ enum dmi_field { DMI_PRODUCT_VERSION, DMI_PRODUCT_SERIAL, DMI_PRODUCT_UUID, + DMI_PRODUCT_FAMILY, DMI_BOARD_VENDOR, DMI_BOARD_NAME, DMI_BOARD_VERSION, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3f39d27decf4..c50c9218e31e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1824,7 +1824,7 @@ struct net_device { #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif - unsigned long tx_queue_len; + unsigned int tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; @@ -2456,6 +2456,7 @@ static inline int dev_recursion_level(void) struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); @@ -2573,9 +2574,7 @@ static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_gro_checksum_validate_complete(skb, \ compute_pseudo(skb, proto)); \ - if (__ret) \ - __skb_mark_checksum_bad(skb); \ - else \ + if (!__ret) \ skb_gro_incr_csum_unnecessary(skb); \ __ret; \ }) @@ -3931,6 +3930,10 @@ void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); +int skb_crc32c_csum_help(struct sk_buff *skb); +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features); + struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index be378cf47fcc..b3044c2c62cb 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m, int xt_target_to_user(const struct xt_entry_target *t, struct xt_entry_target __user *u); int xt_data_to_user(void __user *dst, const void *src, - int usersize, int size); + int usersize, int size, int aligned_size); void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index a30efb437e6d..e0cbf17af780 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb, /* True if the target is not a standard target */ #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) +static inline bool ebt_invalid_target(int target) +{ + return (target < -NUM_STANDARD_TARGETS || target >= 0); +} + #endif diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 5fff5ba5964e..8664fd26eb5d 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -97,6 +97,21 @@ struct netlink_ext_ack { #define NL_SET_ERR_MSG_MOD(extack, msg) \ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg) +#define NL_SET_BAD_ATTR(extack, attr) do { \ + if ((extack)) \ + (extack)->bad_attr = (attr); \ +} while (0) + +#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ + static const char __msg[] = (msg); \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (__extack) { \ + __extack->_msg = __msg; \ + __extack->bad_attr = (attr); \ + } \ +} while (0) + extern void netlink_kernel_release(struct sock *sk); extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 0db37158a61d..6c8c5d8041b7 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -27,8 +27,8 @@ /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ #define FC_PORT_ROLE_NVME_INITIATOR 0x10 -#define FC_PORT_ROLE_NVME_TARGET 0x11 -#define FC_PORT_ROLE_NVME_DISCOVERY 0x12 +#define FC_PORT_ROLE_NVME_TARGET 0x20 +#define FC_PORT_ROLE_NVME_DISCOVERY 0x40 /** @@ -642,15 +642,7 @@ enum { * sequence in one LLDD operation. Errors during Data * sequence transmit must not allow RSP sequence to be sent. */ - NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), - /* Bit 1: When 0, the LLDD will deliver FCP CMD - * on the CPU it should be affinitized to. Thus work will - * be scheduled on the cpu received on. When 1, the LLDD - * may not deliver the CMD on the CPU it should be worked - * on. The transport should pick a cpu to schedule the work - * on. - */ - NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2), + NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1), /* Bit 2: When 0, the LLDD is calling the cmd rcv handler * in a non-isr context, allowing the transport to finish * op completion in the calling context. When 1, the LLDD @@ -658,7 +650,7 @@ enum { * requiring the transport to transition to a workqueue * for op completion. */ - NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3), + NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2), /* Bit 3: When 0, the LLDD is calling the op done handler * in a non-isr context, allowing the transport to finish * op completion in the calling context. When 1, the LLDD diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index ec6b11deb773..1e0deb8e8494 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -8,7 +8,7 @@ #include <linux/ioport.h> #include <linux/of.h> -typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *); +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); /* * Workarounds only applied to 32bit powermac machines diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index dc8224ae28d5..e0d1946270f3 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent); +extern int of_platform_device_destroy(struct device *dev, void *data); extern int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent); diff --git a/include/linux/pci.h b/include/linux/pci.h index 33c2b0b77429..8039f9f0ca05 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -183,6 +183,11 @@ enum pci_dev_flags { PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), /* Do not use FLR even if device advertises PCI_AF_CAP */ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), + /* + * Resume before calling the driver's system suspend hooks, disabling + * the direct_complete optimization. + */ + PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11), }; enum pci_irq_reroute_variant { @@ -1342,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, const struct irq_affinity *aff_desc) { - if (min_vecs > 1) - return -EINVAL; - return 1; + if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) + return 1; + return -ENOSPC; } static inline void pci_free_irq_vectors(struct pci_dev *dev) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 24a635887f28..8fc5f0fada5e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -896,7 +896,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -extern u64 perf_event_read_local(struct perf_event *event); +int perf_event_read_local(struct perf_event *event, u64 *value); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1301,7 +1301,10 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * { return ERR_PTR(-EINVAL); } -static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } +static inline int perf_event_read_local(struct perf_event *event, u64 *value) +{ + return -EINVAL; +} static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } diff --git a/include/linux/phy.h b/include/linux/phy.h index 54ef45823fc1..414242200a90 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -83,6 +83,9 @@ typedef enum { PHY_INTERFACE_MODE_1000BASEX, PHY_INTERFACE_MODE_2500BASEX, PHY_INTERFACE_MODE_RXAUI, + PHY_INTERFACE_MODE_XAUI, + /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ + PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -149,6 +152,10 @@ static inline const char *phy_modes(phy_interface_t interface) return "2500base-x"; case PHY_INTERFACE_MODE_RXAUI: return "rxaui"; + case PHY_INTERFACE_MODE_XAUI: + return "xaui"; + case PHY_INTERFACE_MODE_10GKR: + return "10gbase-kr"; default: return "unknown"; } @@ -363,6 +370,7 @@ struct phy_c45_device_ids { * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc. * has_fixups: Set to true if this phy has fixups/quirks. * suspended: Set to true if this phy has been suspended successfully. + * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * link_timeout: The number of timer firings to wait before the @@ -399,6 +407,7 @@ struct phy_device { bool is_pseudo_fixed_link; bool has_fixups; bool suspended; + bool sysfs_links; enum phy_state state; @@ -716,14 +725,24 @@ static inline bool phy_is_internal(struct phy_device *phydev) } /** + * phy_interface_mode_is_rgmii - Convenience function for testing if a + * PHY interface mode is RGMII (all variants) + * @mode: the phy_interface_t enum + */ +static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) +{ + return mode >= PHY_INTERFACE_MODE_RGMII && + mode <= PHY_INTERFACE_MODE_RGMII_TXID; +}; + +/** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) * @phydev: the phy_device struct */ static inline bool phy_interface_is_rgmii(struct phy_device *phydev) { - return phydev->interface >= PHY_INTERFACE_MODE_RGMII && - phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; + return phy_interface_mode_is_rgmii(phydev->interface); }; /* @@ -792,6 +811,7 @@ int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); +int phy_restart_aneg(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { @@ -815,6 +835,8 @@ static inline const char *phydev_name(const struct phy_device *phydev) void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) __printf(2, 3); void phy_attached_info(struct phy_device *phydev); + +/* Clause 22 PHY */ int genphy_config_init(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); @@ -829,6 +851,16 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; } + +/* Clause 45 PHY */ +int genphy_c45_restart_aneg(struct phy_device *phydev); +int genphy_c45_aneg_done(struct phy_device *phydev); +int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask); +int genphy_c45_read_lpa(struct phy_device *phydev); +int genphy_c45_read_pma(struct phy_device *phydev); +int genphy_c45_pma_setup_forced(struct phy_device *phydev); +int genphy_c45_an_disable_aneg(struct phy_device *phydev); + void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); @@ -842,7 +874,6 @@ void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); void phy_trigger_machine(struct phy_device *phydev, bool sync); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); -int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); int phy_ethtool_ksettings_set(struct phy_device *phydev, diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 279e3c5326e3..7620eb127cff 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -42,8 +42,6 @@ * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high * impedance to VDD). If the argument is != 0 pull-up is enabled, * if it is 0, pull-up is total, i.e. the pin is connected to VDD. - * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous - * input and output operations. * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open * collector) which means it is usually wired with other output ports * which are then pulled up with an external resistor. Setting this @@ -98,7 +96,6 @@ enum pin_config_param { PIN_CONFIG_BIAS_PULL_DOWN, PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, PIN_CONFIG_BIAS_PULL_UP, - PIN_CONFIG_BIDIRECTIONAL, PIN_CONFIG_DRIVE_OPEN_DRAIN, PIN_CONFIG_DRIVE_OPEN_SOURCE, PIN_CONFIG_DRIVE_PUSH_PULL, diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h new file mode 100644 index 000000000000..84789ca634aa --- /dev/null +++ b/include/linux/platform_data/microchip-ksz.h @@ -0,0 +1,29 @@ +/* + * Microchip KSZ series switch platform data + * + * Copyright (C) 2017 + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MICROCHIP_KSZ_H +#define __MICROCHIP_KSZ_H + +#include <linux/kernel.h> + +struct ksz_platform_data { + u32 chip_id; + u16 enabled_ports; +}; + +#endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6b2e0dd88569..d8c97ec8a8e6 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -278,6 +278,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i; + + for (i = 0; i < n; i++) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i] = ptr; + } + + return i; +} + /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -328,6 +344,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. @@ -403,6 +468,61 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) return 0; } +/* + * Return entries into ring. Destroy entries that don't fit. + * + * Note: this is expected to be a rare slow path operation. + * + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ +static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, + void (*destroy)(void *)) +{ + unsigned long flags; + int head; + + spin_lock_irqsave(&r->consumer_lock, flags); + spin_lock(&r->producer_lock); + + if (!r->size) + goto done; + + /* + * Clean out buffered entries (for simplicity). This way following code + * can test entries for NULL and if not assume they are valid. + */ + head = r->consumer_head - 1; + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + + /* + * Go over entries in batch, start moving head back and copy entries. + * Stop when we run into previously unconsumed entries. + */ + while (n) { + head = r->consumer_head - 1; + if (head < 0) + head = r->size - 1; + if (r->queue[head]) { + /* This batch entry will have to be destroyed. */ + goto done; + } + r->queue[head] = batch[--n]; + r->consumer_tail = r->consumer_head = head; + } + +done: + /* Destroy all entries left in the batch. */ + while (n) + destroy(batch[--n]); + spin_unlock(&r->producer_lock); + spin_unlock_irqrestore(&r->consumer_lock, flags); +} + static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, int size, gfp_t gfp, void (*destroy)(void *)) diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 422bc2e4cb6a..ef3eb8bbfee4 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, - struct task_struct *new_parent); + struct task_struct *new_parent, + const struct cred *ptracer_cred); extern void __ptrace_unlink(struct task_struct *child); extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 @@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; - __ptrace_link(child, current->parent); + __ptrace_link(child, current->parent, current->ptracer_cred); if (child->ptrace & PT_SEIZED) task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); @@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) set_tsk_thread_flag(child, TIF_SIGPENDING); } + else + child->ptracer_cred = NULL; } /** diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index fbab6e0514f0..a567cbf8c5b4 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -96,12 +96,12 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 -#define MAX_NUM_LL2_RX_QUEUES 32 -#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 +#define MAX_NUM_LL2_RX_QUEUES 48 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 15 -#define FW_REVISION_VERSION 3 +#define FW_MINOR_VERSION 20 +#define FW_REVISION_VERSION 0 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -181,6 +181,14 @@ #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) + +#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) +#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) +#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) +#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) +#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) +#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) + /*****************/ /* DQ CONSTANTS */ /*****************/ @@ -457,7 +465,6 @@ #define PXP_BAR_DQ 1 /* PTT and GTT */ -#define PXP_NUM_PF_WINDOWS 12 #define PXP_PER_PF_ENTRY_SIZE 8 #define PXP_NUM_GLOBAL_WINDOWS 243 #define PXP_GLOBAL_ENTRY_SIZE 4 @@ -482,6 +489,7 @@ #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 #define PXP_PF_ME_CONCRETE_ADDR 0x1fc +#define PXP_NUM_PF_WINDOWS 12 #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 @@ -618,16 +626,21 @@ /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 -/******************/ -/* SDMs CONSTANTS */ -/******************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 -#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 -#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7 +#define PRM_DMA_PAD_BYTES_NUM 2 +/*****************/ +/* SDMs CONSTANTS */ +/*****************/ + +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 + +/********************/ +/* Completion types */ +/********************/ #define SDM_COMP_TYPE_NONE 0 #define SDM_COMP_TYPE_WAKE_THREAD 1 @@ -638,10 +651,11 @@ #define SDM_COMP_TYPE_INDICATE_ERROR 6 #define SDM_COMP_TYPE_RELEASE_THREAD 7 #define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 -/******************/ -/* PBF CONSTANTS */ -/******************/ +/*****************/ +/* PBF Constants */ +/*****************/ /* Number of PBF command queue lines. Each line is 32B. */ #define PBF_MAX_CMD_LINES 3328 @@ -861,7 +875,7 @@ enum db_dest { /* Enum of doorbell DPM types */ enum db_dpm_type { DPM_LEGACY, - DPM_ROCE, + DPM_RDMA, DPM_L2_INLINE, DPM_L2_BD, MAX_DB_DPM_TYPE @@ -884,8 +898,8 @@ struct db_l2_dpm_data { #define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 #define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 #define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 -#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 -#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 +#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1 +#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31 }; /* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ @@ -931,31 +945,33 @@ struct db_pwm_addr { }; /* Parameters to RoCE firmware, passed in EDPM doorbell */ -struct db_roce_dpm_params { +struct db_rdma_dpm_params { __le32 params; -#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; /* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ -struct db_roce_dpm_data { +struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; - struct db_roce_dpm_params params; + struct db_rdma_dpm_params params; }; /* Igu interrupt command */ @@ -1026,6 +1042,42 @@ struct parsing_and_err_flags { #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +struct parsing_err_flags { + __le16 flags; +#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 +}; + struct pb_context { __le32 crc[4]; }; @@ -1288,39 +1340,56 @@ struct tdif_task_context { struct timers_context { __le32 logical_client_0; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 __le32 logical_client_1; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 __le32 logical_client_2; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 __le32 host_expiration_fields; -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 -#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 -#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; + +enum tunnel_next_protocol { + e_unknown = 0, + e_l2 = 1, + e_ipv4 = 2, + e_ipv6 = 3, + MAX_TUNNEL_NEXT_PROTOCOL +}; + #endif /* __COMMON_HSI__ */ #endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 34d93eb5bfba..cb06e6e368e1 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -75,7 +75,8 @@ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 947a635d04bb..12fc9e788eea 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -13,7 +13,6 @@ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) struct fcoe_abts_pkt { __le32 abts_rsp_fc_payload_lo; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 69949f8e354b..85e086cba639 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -75,25 +75,13 @@ #define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ -#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) -#define ISCSI_OPCODE_NOP_OUT ( \ - ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) -#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) -#define ISCSI_OPCODE_SCSI_CMD ( \ - ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) -#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) -#define ISCSI_OPCODE_TMF_REQUEST ( \ - ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) -#define ISCSI_OPCODE_LOGIN_REQUEST ( \ - ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) -#define ISCSI_OPCODE_TEXT_REQUEST ( \ - ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_DATA_OUT (5) -#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) -#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ - ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_NOP_OUT (0) +#define ISCSI_OPCODE_SCSI_CMD (1) +#define ISCSI_OPCODE_TMF_REQUEST (2) +#define ISCSI_OPCODE_LOGIN_REQUEST (3) +#define ISCSI_OPCODE_TEXT_REQUEST (4) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ #define ISCSI_OPCODE_NOP_IN (0x20) @@ -172,17 +160,23 @@ struct iscsi_async_msg_hdr { struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 opcode; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 __le32 hdr_second_dword; #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 @@ -790,9 +784,9 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, ISCSI_CONN_ERROR_DATA_OVERRUN, ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, + ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_PRS_ERRORS, + ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, @@ -1304,22 +1298,6 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_total_pdu_cnt; }; -struct iscsi_db_data { - u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 sq_prod; -}; - struct tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; @@ -1398,5 +1376,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; #endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d66d16a559e1..0eef0a2b1901 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -47,8 +47,7 @@ struct qed_queue_start_common_params { /* Relative, but relevant only for PFs */ u8 stats_id; - /* These are always absolute */ - u16 sb; + struct qed_sb_info *p_sb; u8 sb_idx; }; @@ -74,6 +73,9 @@ struct qed_dev_eth_info { /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; + + /* Might depend on available resources [in case of VF] */ + bool xdp_supported; }; struct qed_update_vport_rss_params { diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index bd6bcb809415..1e015c50e6b8 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -24,6 +24,11 @@ struct qed_dev_fcoe_info { void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; + + u64 wwpn; + u64 wwnn; + + u8 num_cqs; }; struct qed_fcoe_params_offload { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index c70ac13a97e6..74f6b99754aa 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -156,6 +156,11 @@ struct qed_dcbx_get { struct qed_dcbx_admin_params local; }; +enum qed_nvm_images { + QED_NVM_IMAGE_ISCSI_CFG, + QED_NVM_IMAGE_FCOE_CFG, +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -180,6 +185,10 @@ struct qed_eth_pf_params { */ u16 num_cons; + /* per-VF number of CIDs */ + u8 num_vf_cons; +#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) + /* To enable arfs, previous to HW-init a positive number needs to be * set [as filters require allocated searcher ILT memory]. * This will set the maximal number of configured steering-filters. @@ -328,6 +337,14 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; +#define QED_MFW_VERSION_0_MASK 0x000000FF +#define QED_MFW_VERSION_0_OFFSET 0 +#define QED_MFW_VERSION_1_MASK 0x0000FF00 +#define QED_MFW_VERSION_1_OFFSET 8 +#define QED_MFW_VERSION_2_MASK 0x00FF0000 +#define QED_MFW_VERSION_2_OFFSET 16 +#define QED_MFW_VERSION_3_MASK 0xFF000000 +#define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; u8 mf_mode; @@ -337,12 +354,23 @@ struct qed_dev_info { bool wol_support; + /* MBI version */ + u32 mbi_version; +#define QED_MBI_VERSION_0_MASK 0x000000FF +#define QED_MBI_VERSION_0_OFFSET 0 +#define QED_MBI_VERSION_1_MASK 0x0000FF00 +#define QED_MBI_VERSION_1_OFFSET 8 +#define QED_MBI_VERSION_2_MASK 0x00FF0000 +#define QED_MBI_VERSION_2_OFFSET 16 + enum qed_dev_type dev_type; /* Output parameters for qede */ bool vxlan_enable; bool gre_enable; bool geneve_enable; + + u8 abs_pf_id; }; enum qed_sb_type { @@ -503,9 +531,7 @@ struct qed_common_ops { int (*set_power_state)(struct qed_dev *cdev, pci_power_t state); - void (*set_id)(struct qed_dev *cdev, - char name[], - char ver_str[]); + void (*set_name) (struct qed_dev *cdev, char name[]); /* Client drivers need to make this call before slowpath_start. * PF params required for the call before slowpath_start is @@ -614,6 +640,19 @@ struct qed_common_ops { struct qed_chain *p_chain); /** + * @brief nvm_get_image - reads an entire image from nvram + * + * @param cdev + * @param type - type of the request nvram image + * @param buf - preallocated buffer to fill with the image + * @param len - length of the allocated buffer + * + * @return 0 on success, error otherwise + */ + int (*nvm_get_image)(struct qed_dev *cdev, + enum qed_nvm_images type, u8 *buf, u16 len); + +/** * @brief get_coalesce - Get coalesce parameters in usec * * @param cdev @@ -700,11 +739,13 @@ struct qed_common_ops { (((value) >> (name ## _SHIFT)) & name ## _MASK) /* Debug print definitions */ -#define DP_ERR(cdev, fmt, ...) \ - pr_err("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - DP_NAME(cdev) ? DP_NAME(cdev) : "", \ - ## __VA_ARGS__) \ +#define DP_ERR(cdev, fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } while (0) #define DP_NOTICE(cdev, fmt, ...) \ do { \ @@ -869,9 +910,15 @@ struct qed_eth_stats { #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { - int sb_cnt; - int sb_iov_cnt; - int sb_free_blk; + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 3414649133d2..111e606a74c8 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -210,6 +210,11 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. + * @change_mac Change MAC of interface + * @param cdev + * @param handle - the connection handle. + * @param mac - new MAC to configure. + * @return 0 on success, otherwise error value. */ struct qed_iscsi_ops { const struct qed_common_ops *common; @@ -248,6 +253,8 @@ struct qed_iscsi_ops { int (*get_stats)(struct qed_dev *cdev, struct qed_iscsi_stats *stats); + + int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac); }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void); diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 72c770f9f666..a9b3050f469c 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -42,7 +42,7 @@ #define RDMA_MAX_SGE_PER_SQ_WQE (4) #define RDMA_MAX_SGE_PER_RQ_WQE (4) -#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 866f063026de..fe6a33e45977 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -37,6 +37,8 @@ #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) #define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index a5e843268f0e..dbf7a43c3e1f 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -111,7 +111,6 @@ struct tcp_offload_params { __le32 snd_wnd; __le32 rcv_wnd; __le32 snd_wl1; - __le32 ts_time; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; @@ -122,7 +121,7 @@ struct tcp_offload_params { u8 ka_probe_cnt; u8 rt_cnt; __le16 rtt_var; - __le16 reserved2; + __le16 fw_internal; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; @@ -130,7 +129,7 @@ struct tcp_offload_params { u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 ts_ticks_per_second; + __le32 reserved3[2]; }; struct tcp_offload_params_opt2 { diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 57e54847b0b9..dea59c8eec54 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -18,7 +18,8 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, - unsigned change, gfp_t flags); + unsigned change, u32 event, + gfp_t flags); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index c68307bc306f..707910c6c6c5 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -37,6 +37,7 @@ struct sockaddr_rxrpc { #define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ #define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ +#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */ /* * RxRPC control messages @@ -53,6 +54,7 @@ struct sockaddr_rxrpc { #define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ #define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ #define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ +#define RXRPC_UPGRADE_SERVICE 11 /* s-: Request service upgrade for client call */ /* * RxRPC security levels diff --git a/include/linux/serdev.h b/include/linux/serdev.h index cda76c6506ca..e69402d4a8ae 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *); void serdev_device_close(struct serdev_device *); unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); void serdev_device_set_flow_control(struct serdev_device *, bool); +int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); void serdev_device_wait_until_sent(struct serdev_device *, long); int serdev_device_get_tiocm(struct serdev_device *); int serdev_device_set_tiocm(struct serdev_device *, int, int); @@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev return 0; } static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} +static inline int serdev_device_write_buf(struct serdev_device *serdev, + const unsigned char *buf, + size_t count) +{ + return -ENODEV; +} static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} static inline int serdev_device_get_tiocm(struct serdev_device *serdev) { @@ -301,7 +308,7 @@ struct tty_driver; struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx); -void serdev_tty_port_unregister(struct tty_port *port); +int serdev_tty_port_unregister(struct tty_port *port); #else static inline struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, @@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port, { return ERR_PTR(-ENODEV); } -static inline void serdev_tty_port_unregister(struct tty_port *port) {} -#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ - -static inline int serdev_device_write_buf(struct serdev_device *serdev, - const unsigned char *data, - size_t count) +static inline int serdev_tty_port_unregister(struct tty_port *port) { - return serdev_device_write(serdev, data, count, 0); + return -ENODEV; } +#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ #endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index f4dfade428f0..35226cd4efb0 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a) return ptr_ring_consume(&a->ring); } +static inline int skb_array_consume_batched(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched(&a->ring, (void **)array, n); +} + static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) { return ptr_ring_consume_irq(&a->ring); } +static inline int skb_array_consume_batched_irq(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); +} + static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) { return ptr_ring_consume_any(&a->ring); } +static inline int skb_array_consume_batched_any(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); +} + + static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) { return ptr_ring_consume_bh(&a->ring); } +static inline int skb_array_consume_batched_bh(struct skb_array *a, + struct sk_buff **array, int n) +{ + return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); +} + static inline int __skb_array_len_with_tag(struct sk_buff *skb) { if (likely(skb)) { @@ -156,6 +181,12 @@ static void __skb_array_destroy_skb(void *ptr) kfree_skb(ptr); } +static inline void skb_array_unconsume(struct skb_array *a, + struct sk_buff **skbs, int n) +{ + ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); +} + static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) { return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d92056b2da44..0873c2f9a9c1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -109,6 +109,7 @@ * may perform further validation in this case. * GRE: only if the checksum is present in the header. * SCTP: indicates the CRC in SCTP header has been validated. + * FCOE: indicates the CRC in FC frame has been validated. * * skb->csum_level indicates the number of consecutive checksums found in * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. @@ -126,8 +127,10 @@ * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the * hardware doesn't need to parse L3/L4 headers to implement this. * - * Note: Even if device supports only some protocols, but is able to produce - * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * Notes: + * - Even if device supports only some protocols, but is able to produce + * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. + * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols. * * CHECKSUM_PARTIAL: * @@ -162,14 +165,11 @@ * * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate - * checksum offload capability. If a device has limited checksum capabilities - * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as - * described above) a helper function can be called to resolve - * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper - * function takes a spec argument that describes the protocol layer that is - * supported for checksum offload and can be called for each packet. If a - * packet does not match the specification for offload, skb_checksum_help - * is called to resolve the checksum. + * checksum offload capability. + * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based + * on network device checksumming capabilities: if a packet does not match + * them, skb_checksum_help or skb_crc32c_help (depending on the value of + * csum_not_inet, see item D.) is called to resolve the checksum. * * CHECKSUM_NONE: * @@ -189,11 +189,13 @@ * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note the there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports - * both IP checksum offload and SCTP CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers. + * will set set csum_start and csum_offset accordingly, set ip_summed to + * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in + * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. + * A driver that supports both IP checksum offload and SCTP CRC32c offload + * must verify which offload is configured for a packet by testing the + * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve + * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1. * * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack @@ -506,66 +508,6 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif -/** - * struct skb_mstamp - multi resolution time stamps - * @stamp_us: timestamp in us resolution - * @stamp_jiffies: timestamp in jiffies - */ -struct skb_mstamp { - union { - u64 v64; - struct { - u32 stamp_us; - u32 stamp_jiffies; - }; - }; -}; - -/** - * skb_mstamp_get - get current timestamp - * @cl: place to store timestamps - */ -static inline void skb_mstamp_get(struct skb_mstamp *cl) -{ - u64 val = local_clock(); - - do_div(val, NSEC_PER_USEC); - cl->stamp_us = (u32)val; - cl->stamp_jiffies = (u32)jiffies; -} - -/** - * skb_mstamp_delta - compute the difference in usec between two skb_mstamp - * @t1: pointer to newest sample - * @t0: pointer to oldest sample - */ -static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, - const struct skb_mstamp *t0) -{ - s32 delta_us = t1->stamp_us - t0->stamp_us; - u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies; - - /* If delta_us is negative, this might be because interval is too big, - * or local_clock() drift is too big : fallback using jiffies. - */ - if (delta_us <= 0 || - delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ))) - - delta_us = jiffies_to_usecs(delta_jiffies); - - return delta_us; -} - -static inline bool skb_mstamp_after(const struct skb_mstamp *t1, - const struct skb_mstamp *t0) -{ - s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; - - if (!diff) - diff = t1->stamp_us - t0->stamp_us; - return diff > 0; -} - /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -616,6 +558,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking @@ -646,7 +589,7 @@ struct sk_buff { union { ktime_t tstamp; - struct skb_mstamp skb_mstamp; + u64 skb_mstamp; }; }; struct rb_node rbnode; /* used in netem & tcp stack */ @@ -744,7 +687,7 @@ struct sk_buff { __u8 csum_valid:1; __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 csum_bad:1; + __u8 csum_not_inet:1; __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE @@ -915,6 +858,15 @@ static inline bool skb_pkt_type_ok(u32 ptype) return ptype <= PACKET_OTHERHOST; } +static inline unsigned int skb_napi_id(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + return skb->napi_id; +#else + return 0; +#endif +} + void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); @@ -1001,10 +953,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); -int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, - int offset, int len); -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, - int len); +int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) consume_skb(a) @@ -3145,6 +3097,8 @@ struct skb_checksum_ops { __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); }; +extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; + __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum, const struct skb_checksum_ops *ops); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, @@ -3314,13 +3268,6 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); -static inline void sw_tx_timestamp(struct sk_buff *skb) -{ - if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && - !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - skb_tstamp_tx(skb, NULL); -} - /** * skb_tx_timestamp() - Driver hook for transmit timestamping * @@ -3336,7 +3283,8 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); - sw_tx_timestamp(skb); + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + skb_tstamp_tx(skb, NULL); } /** @@ -3402,21 +3350,6 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) } } -static inline void __skb_mark_checksum_bad(struct sk_buff *skb) -{ - /* Mark current checksum as bad (typically called from GRO - * path). In the case that ip_summed is CHECKSUM_NONE - * this must be the first checksum encountered in the packet. - * When ip_summed is CHECKSUM_UNNECESSARY, this is the first - * checksum after the last one validated. For UDP, a zero - * checksum can not be marked as bad. - */ - - if (skb->ip_summed == CHECKSUM_NONE || - skb->ip_summed == CHECKSUM_UNNECESSARY) - skb->csum_bad = 1; -} - /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -3470,9 +3403,6 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } - } else if (skb->csum_bad) { - /* ip_summed == CHECKSUM_NONE in this case */ - return (__force __sum16)1; } skb->csum = psum; @@ -3532,8 +3462,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) static inline bool __skb_checksum_convert_check(struct sk_buff *skb) { - return (skb->ip_summed == CHECKSUM_NONE && - skb->csum_valid && !skb->csum_bad); + return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); } static inline void __skb_checksum_convert(struct sk_buff *skb, diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h index a18e0783946b..787e7ad53d45 100644 --- a/include/linux/soc/renesas/rcar-rst.h +++ b/include/linux/soc/renesas/rcar-rst.h @@ -1,6 +1,11 @@ #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ #define __LINUX_SOC_RENESAS_RCAR_RST_H__ +#if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \ + defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796) int rcar_rst_read_mode_pins(u32 *mode); +#else +static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } +#endif #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 3921cb9dfadb..108739ff9223 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -177,6 +177,7 @@ struct plat_stmmacenet_data { void (*fix_mac_speed)(void *priv, unsigned int speed); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); + struct mac_device_info *(*setup)(void *priv); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; @@ -185,6 +186,7 @@ struct plat_stmmacenet_data { struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; + bool has_sun8i; bool tso_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 94631026f79c..11cef5a7bc87 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) { char *cp = (char *)p; struct kvec *vec = &rqstp->rq_arg.head[0]; - return cp == (char *)vec->iov_base + vec->iov_len; + return cp >= (char*)vec->iov_base + && cp <= (char*)vec->iov_base + vec->iov_len; } static inline int diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 22854f028434..542ca1ae02c4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -123,7 +123,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - struct skb_mstamp snt_synack; /* first SYNACK sent time */ + u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; u32 txhash; u32 rcv_isn; @@ -211,7 +211,7 @@ struct tcp_sock { /* Information of the most recently (s)acked skb */ struct tcp_rack { - struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u64 mstamp; /* (Re)sent time of the skb */ u32 rtt_us; /* Associated RTT */ u32 end_seq; /* Ending TCP sequence of the skb */ u8 advanced; /* mstamp advanced since last lost marking */ @@ -240,7 +240,7 @@ struct tcp_sock { u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ /* RTT measurement */ - struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ + u64 tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ @@ -280,8 +280,8 @@ struct tcp_sock { u32 delivered; /* Total data packets delivered incl. rexmits */ u32 lost; /* Total data packets lost incl. rexmits */ u32 app_limited; /* limited until "delivered" reaches this val */ - struct skb_mstamp first_tx_mstamp; /* start of window send phase */ - struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ + u64 first_tx_mstamp; /* start of window send phase */ + u64 delivered_mstamp; /* time we reached "delivered" */ u32 rate_delivered; /* saved rate sample: packets delivered */ u32 rate_interval_us; /* saved rate sample: time elapsed */ @@ -335,16 +335,16 @@ struct tcp_sock { /* Receiver side RTT estimation */ struct { - u32 rtt_us; - u32 seq; - struct skb_mstamp time; + u32 rtt_us; + u32 seq; + u64 time; } rcv_rtt_est; /* Receiver queue space */ struct { - int space; - u32 seq; - struct skb_mstamp time; + int space; + u32 seq; + u64 time; } rcvq_space; /* TCP-specific MTU probe information. */ diff --git a/include/linux/tty.h b/include/linux/tty.h index d07cd2105a6c..eccb4ec30a8a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp); +extern struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +extern void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index); extern int tty_port_alloc_xmit_buf(struct tty_port *port); extern void tty_port_free_xmit_buf(struct tty_port *port); extern void tty_port_destroy(struct tty_port *port); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index a469999a106d..50398b69ca44 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -148,6 +148,7 @@ struct usb_hcd { unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ unsigned msix_enabled:1; /* driver has MSI-X enabled? */ + unsigned msi_enabled:1; /* driver has MSI enabled? */ unsigned remove_phy:1; /* auto-remove USB phy */ /* The next flag is a stopgap, to be removed when all the HCDs diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 7dffa5624ea6..97116379db5f 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -206,6 +206,7 @@ struct cdc_state { }; extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); +extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_status(struct usbnet *, struct urb *); diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index eb50ce54b759..413335c8cb52 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h @@ -29,7 +29,7 @@ struct edid; struct cec_adapter; struct cec_notifier; -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) /** * cec_notifier_get - find or create a new cec_notifier for the given device. diff --git a/include/media/cec.h b/include/media/cec.h index b8eb895731d5..bfa88d4d67e1 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -173,7 +173,7 @@ struct cec_adapter { bool passthrough; struct cec_log_addrs log_addrs; -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#ifdef CONFIG_CEC_NOTIFIER struct cec_notifier *notifier; #endif @@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input); */ int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); -#ifdef CONFIG_MEDIA_CEC_NOTIFIER +#ifdef CONFIG_CEC_NOTIFIER void cec_register_cec_notifier(struct cec_adapter *adap, struct cec_notifier *notifier); #endif diff --git a/include/net/act_api.h b/include/net/act_api.h index cfa2ae33da9a..26ffd8333f50 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -42,6 +42,7 @@ struct tc_action { struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; struct tc_cookie *act_cookie; + struct tcf_chain *goto_chain; }; #define tcf_head common.tcfa_head #define tcf_index common.tcfa_index @@ -180,12 +181,12 @@ int tcf_unregister_action(struct tc_action_ops *a, int tcf_action_destroy(struct list_head *actions, int bind); int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res); -int tcf_action_init(struct net *net, struct nlattr *nla, - struct nlattr *est, char *n, int ovr, - int bind, struct list_head *); -struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, - struct nlattr *est, char *n, int ovr, - int bind); +int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, + struct nlattr *est, char *name, int ovr, int bind, + struct list_head *actions); +struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, + struct nlattr *nla, struct nlattr *est, + char *name, int ovr, int bind); int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 99aa5e5e3100..fe98f0a5bef0 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -399,6 +399,7 @@ enum { #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_EXT_SCAN_POLICY 0x80 +#define HCI_LE_CHAN_SEL_ALG2 0x40 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -1498,6 +1499,13 @@ struct hci_rp_le_read_max_data_len { __le16 rx_time; } __packed; +#define HCI_OP_LE_SET_DEFAULT_PHY 0x2031 +struct hci_cp_le_set_default_phy { + __u8 all_phys; + __u8 tx_phys; + __u8 rx_phys; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 1797235cd590..d79d28f5318c 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -104,6 +104,8 @@ struct bond_option { int __bond_opt_set(struct bonding *bond, unsigned int option, struct bond_opt_value *val); +int __bond_opt_set_notify(struct bonding *bond, unsigned int option, + struct bond_opt_value *val); int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, diff --git a/include/net/dsa.h b/include/net/dsa.h index 8e24677b1c62..2effb0af9d7c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/ethtool.h> #include <net/devlink.h> +#include <net/switchdev.h> struct tc_action; struct phy_device; @@ -27,13 +28,14 @@ struct fixed_phy_status; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_BRCM, DSA_TAG_PROTO_DSA, - DSA_TAG_PROTO_TRAILER, DSA_TAG_PROTO_EDSA, - DSA_TAG_PROTO_BRCM, - DSA_TAG_PROTO_QCA, - DSA_TAG_PROTO_MTK, + DSA_TAG_PROTO_KSZ, DSA_TAG_PROTO_LAN9303, + DSA_TAG_PROTO_MTK, + DSA_TAG_PROTO_QCA, + DSA_TAG_PROTO_TRAILER, DSA_TAG_LAST, /* MUST BE LAST */ }; @@ -125,6 +127,8 @@ struct dsa_switch_tree { * protocol to use. */ struct net_device *master_netdev; + + /* Copy of tag_ops->rcv for faster access in hot path */ struct sk_buff * (*rcv)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, @@ -137,10 +141,9 @@ struct dsa_switch_tree { const struct ethtool_ops *master_orig_ethtool_ops; /* - * The switch and port to which the CPU is attached. + * The switch port to which the CPU is attached. */ - struct dsa_switch *cpu_switch; - s8 cpu_port; + struct dsa_port *cpu_dp; /* * Data for the individual switch chips. @@ -251,7 +254,7 @@ struct dsa_switch { static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) { - return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port); + return !!(ds->cpu_port_mask & (1 << p)); } static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) @@ -279,28 +282,12 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) * Else return the (DSA) port number that connects to the * switch that is one hop closer to the cpu. */ - if (dst->cpu_switch == ds) - return dst->cpu_port; + if (dst->cpu_dp->ds == ds) + return dst->cpu_dp->index; else - return ds->rtable[dst->cpu_switch->index]; + return ds->rtable[dst->cpu_dp->ds->index]; } -struct switchdev_trans; -struct switchdev_obj; -struct switchdev_obj_port_fdb; -struct switchdev_obj_port_mdb; -struct switchdev_obj_port_vlan; - -#define DSA_NOTIFIER_BRIDGE_JOIN 1 -#define DSA_NOTIFIER_BRIDGE_LEAVE 2 - -/* DSA_NOTIFIER_BRIDGE_* */ -struct dsa_notifier_bridge_info { - struct net_device *br; - int sw_index; - int port; -}; - struct dsa_switch_ops { /* * Legacy probing. @@ -410,7 +397,7 @@ struct dsa_switch_ops { const struct switchdev_obj_port_vlan *vlan); int (*port_vlan_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); /* * Forwarding database @@ -425,7 +412,7 @@ struct dsa_switch_ops { const struct switchdev_obj_port_fdb *fdb); int (*port_fdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); /* * Multicast database @@ -440,7 +427,7 @@ struct dsa_switch_ops { const struct switchdev_obj_port_mdb *mdb); int (*port_mdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_mdb *mdb, - int (*cb)(struct switchdev_obj *obj)); + switchdev_obj_dump_cb_t *cb); /* * RXNFC @@ -480,23 +467,18 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); struct net_device *dsa_dev_to_net_device(struct device *dev); -static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) -{ - return dst->rcv != NULL; -} - +/* Keep inline for faster access in hot path */ static inline bool netdev_uses_dsa(struct net_device *dev) { #if IS_ENABLED(CONFIG_NET_DSA) - if (dev->dsa_ptr != NULL) - return dsa_uses_tagged_protocol(dev->dsa_ptr); + return dev->dsa_ptr && dev->dsa_ptr->rcv; #endif return false; } struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n); void dsa_unregister_switch(struct dsa_switch *ds); -int dsa_register_switch(struct dsa_switch *ds, struct device *dev); +int dsa_register_switch(struct dsa_switch *ds); #ifdef CONFIG_PM_SLEEP int dsa_switch_suspend(struct dsa_switch *ds); int dsa_switch_resume(struct dsa_switch *ds); diff --git a/include/net/dst.h b/include/net/dst.h index 049af33da3b6..1969008783d8 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -31,9 +31,9 @@ struct sk_buff; struct dst_entry { + struct net_device *dev; struct rcu_head rcu_head; struct dst_entry *child; - struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; @@ -107,10 +107,16 @@ struct dst_entry { }; }; +struct dst_metrics { + u32 metrics[RTAX_MAX]; + atomic_t refcnt; +}; +extern const struct dst_metrics dst_default_metrics; + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); -extern const u32 dst_default_metrics[]; #define DST_METRICS_READ_ONLY 0x1UL +#define DST_METRICS_REFCOUNTED 0x2UL #define DST_METRICS_FLAGS 0x3UL #define __DST_METRICS_PTR(Y) \ ((u32 *)((Y) & ~DST_METRICS_FLAGS)) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8d21d448daa9..e2663e900b0a 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -157,6 +157,24 @@ struct flow_dissector_key_eth_addrs { unsigned char src[ETH_ALEN]; }; +/** + * struct flow_dissector_key_tcp: + * @flags: flags + */ +struct flow_dissector_key_tcp { + __be16 flags; +}; + +/** + * struct flow_dissector_key_ip: + * @tos: tos + * @ttl: ttl + */ +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -177,6 +195,8 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ + FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ + FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 68b88192b00c..c59a098221db 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -128,7 +128,6 @@ static inline int genl_err_attr(struct genl_info *info, int err, * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps - * @ops_list: operations list */ struct genl_ops { const struct nla_policy *policy; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 5894730ec82a..975779d0e7b0 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -92,7 +92,7 @@ struct inet_frags { */ u32 rnd; seqlock_t rnd_seqlock; - int qsize; + unsigned int qsize; unsigned int (*hashfn)(const struct inet_frag_queue *); bool (*match)(const struct inet_frag_queue *q, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c979c878df1c..aa50e2e6fa2a 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -277,7 +277,8 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), void *arg); int fib6_add(struct fib6_node *root, struct rt6_info *rt, - struct nl_info *info, struct mx6_config *mxc); + struct nl_info *info, struct mx6_config *mxc, + struct netlink_ext_ack *extack); int fib6_del(struct rt6_info *rt, struct nl_info *info); void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f5e625f53367..f3da9dd2a8db 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -90,7 +90,7 @@ void ip6_route_cleanup(void); int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg); -int ip6_route_add(struct fib6_config *cfg); +int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack); int ip6_ins_rt(struct rt6_info *); int ip6_del_rt(struct rt6_info *); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 6692c5758b33..3dbfd5e6a347 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -114,11 +114,11 @@ struct fib_info { __be32 fib_prefsrc; u32 fib_tb_id; u32 fib_priority; - u32 *fib_metrics; -#define fib_mtu fib_metrics[RTAX_MTU-1] -#define fib_window fib_metrics[RTAX_WINDOW-1] -#define fib_rtt fib_metrics[RTAX_RTT-1] -#define fib_advmss fib_metrics[RTAX_ADVMSS-1] + struct dst_metrics *fib_metrics; +#define fib_mtu fib_metrics->metrics[RTAX_MTU-1] +#define fib_window fib_metrics->metrics[RTAX_WINDOW-1] +#define fib_rtt fib_metrics->metrics[RTAX_RTT-1] +#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] int fib_nhs; #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_weight; @@ -136,6 +136,7 @@ struct fib_rule; struct fib_table; struct fib_result { + __be32 prefix; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; @@ -263,8 +264,10 @@ struct fib_table { int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags); -int fib_table_insert(struct net *, struct fib_table *, struct fib_config *); -int fib_table_delete(struct net *, struct fib_table *, struct fib_config *); +int fib_table_insert(struct net *, struct fib_table *, struct fib_config *, + struct netlink_ext_ack *extack); +int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, + struct netlink_ext_ack *extack); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); int fib_table_flush(struct net *net, struct fib_table *table); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index dbf0abba33b8..3e505bbff8ca 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, */ extern const struct proto_ops inet6_stream_ops; extern const struct proto_ops inet6_dgram_ops; +extern const struct proto_ops inet6_sockraw_ops; struct group_source_req; struct group_filter; diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index ebfe237aad7e..7c26863b8cf4 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -35,7 +35,8 @@ struct lwtunnel_state { struct lwtunnel_encap_ops { int (*build_state)(struct nlattr *encap, unsigned int family, const void *cfg, - struct lwtunnel_state **ts); + struct lwtunnel_state **ts, + struct netlink_ext_ack *extack); void (*destroy_state)(struct lwtunnel_state *lws); int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*input)(struct sk_buff *skb); @@ -107,12 +108,15 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, unsigned int num); -int lwtunnel_valid_encap_type(u16 encap_type); -int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len); +int lwtunnel_valid_encap_type(u16 encap_type, + struct netlink_ext_ack *extack); +int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, + struct netlink_ext_ack *extack); int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, - struct lwtunnel_state **lws); + struct lwtunnel_state **lws, + struct netlink_ext_ack *extack); int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate); int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate); @@ -172,11 +176,14 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, return -EOPNOTSUPP; } -static inline int lwtunnel_valid_encap_type(u16 encap_type) +static inline int lwtunnel_valid_encap_type(u16 encap_type, + struct netlink_ext_ack *extack) { + NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel"); return -EOPNOTSUPP; } -static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) +static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, + struct netlink_ext_ack *extack) { /* return 0 since we are not walking attr looking for * RTA_ENCAP_TYPE attribute on nexthops. @@ -187,7 +194,8 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) static inline int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, - struct lwtunnel_state **lws) + struct lwtunnel_state **lws, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } diff --git a/include/net/neighbour.h b/include/net/neighbour.h index e4dd3a214034..639b67564a7d 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -317,6 +317,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u32 nlmsg_pid); void __neigh_set_probe_once(struct neighbour *neigh); +bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index e04fa7691e5d..c519bb5b5bb8 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -9,6 +9,7 @@ #ifndef _NF_CONNTRACK_HELPER_H #define _NF_CONNTRACK_HELPER_H +#include <linux/refcount.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_expect.h> @@ -26,6 +27,7 @@ struct nf_conntrack_helper { struct hlist_node hnode; /* Internal use. */ char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */ + refcount_t refcnt; struct module *me; /* pointer to self */ const struct nf_conntrack_expect_policy *expect_policy; @@ -79,6 +81,8 @@ struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name, struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum); +void nf_conntrack_helper_put(struct nf_conntrack_helper *helper); + void nf_ct_helper_init(struct nf_conntrack_helper *helper, u16 l3num, u16 protonum, const char *name, u16 default_port, u16 spec_port, u32 id, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 028faec8fc27..8a8bab8d7b15 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -176,7 +176,7 @@ struct nft_data_desc { int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, unsigned int size, struct nft_data_desc *desc, const struct nlattr *nla); -void nft_data_uninit(const struct nft_data *data, enum nft_data_types type); +void nft_data_release(const struct nft_data *data, enum nft_data_types type); int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, enum nft_data_types type, unsigned int len); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 269fd78bb0ae..537d0a0ad4c4 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -18,11 +18,32 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); #ifdef CONFIG_NET_CLS -void tcf_destroy_chain(struct tcf_proto __rcu **fl); +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, + bool create); +void tcf_chain_put(struct tcf_chain *chain); +int tcf_block_get(struct tcf_block **p_block, + struct tcf_proto __rcu **p_filter_chain); +void tcf_block_put(struct tcf_block *block); +int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode); + #else -static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl) +static inline +int tcf_block_get(struct tcf_block **p_block, + struct tcf_proto __rcu **p_filter_chain) +{ + return 0; +} + +static inline void tcf_block_put(struct tcf_block *block) { } + +static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode) +{ + return TC_ACT_UNSPEC; +} #endif static inline unsigned long @@ -136,6 +157,25 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts, #endif } +static inline void +tcf_exts_stats_update(const struct tcf_exts *exts, + u64 bytes, u64 packets, u64 lastuse) +{ +#ifdef CONFIG_NET_CLS_ACT + int i; + + preempt_disable(); + + for (i = 0; i < exts->nr_actions; i++) { + struct tc_action *a = exts->actions[i]; + + tcf_action_stats_update(a, bytes, packets, lastuse); + } + + preempt_enable(); +#endif +} + /** * tcf_exts_exec - execute tc filter extensions * @skb: socket buffer diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index bec46f63f10c..2579c209ea51 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -113,9 +113,6 @@ static inline void qdisc_run(struct Qdisc *q) __qdisc_run(q); } -int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res, bool compat_mode); - static inline __be16 tc_skb_protocol(const struct sk_buff *skb) { /* We need to take extra care in case the skb came via diff --git a/include/net/request_sock.h b/include/net/request_sock.h index a12a5d25b27e..53ced67c4ae9 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -29,7 +29,7 @@ struct proto; struct request_sock_ops { int family; - int obj_size; + unsigned int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *sk, diff --git a/include/net/route.h b/include/net/route.h index 2cc0e14c6359..08e689f23365 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -113,13 +113,16 @@ struct in_device; int ip_rt_init(void); void rt_cache_flush(struct net *net); void rt_flush_dev(struct net_device *dev); -struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp, - const struct sk_buff *skb); +struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp, + const struct sk_buff *skb); +struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp, + struct fib_result *res, + const struct sk_buff *skb); static inline struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp) { - return __ip_route_output_key_hash(net, flp, NULL); + return ip_route_output_key_hash(net, flp, NULL); } struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, @@ -175,6 +178,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin); +int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, + u8 tos, struct net_device *devin, + struct fib_result *res); static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 22e52093bfda..368850194c94 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -8,6 +8,7 @@ #include <linux/pkt_cls.h> #include <linux/percpu.h> #include <linux/dynamic_queue_limits.h> +#include <linux/list.h> #include <net/gen_stats.h> #include <net/rtnetlink.h> @@ -153,7 +154,7 @@ struct Qdisc_class_ops { void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); + struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 classid); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); @@ -192,8 +193,13 @@ struct Qdisc_ops { struct tcf_result { - unsigned long class; - u32 classid; + union { + struct { + unsigned long class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + }; }; struct tcf_proto_ops { @@ -236,6 +242,7 @@ struct tcf_proto { struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; + struct tcf_chain *chain; struct rcu_head rcu; }; @@ -247,6 +254,19 @@ struct qdisc_skb_cb { unsigned char data[QDISC_CB_PRIV_LEN]; }; +struct tcf_chain { + struct tcf_proto __rcu *filter_chain; + struct tcf_proto __rcu **p_filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; /* chain index */ + unsigned int refcnt; +}; + +struct tcf_block { + struct list_head chain_list; +}; + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a8b38e123f97..5051317162df 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -377,10 +377,11 @@ typedef struct sctp_sender_hb_info { __u64 hb_nonce; } sctp_sender_hb_info_t; -int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp); -int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp); +int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, + gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); void sctp_stream_clear(struct sctp_stream *stream); +void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new); /* What is the current SSN number for this stream? */ #define sctp_ssn_peek(stream, type, sid) \ @@ -1750,7 +1751,7 @@ struct sctp_association { __u32 default_rcv_context; /* Stream arrays */ - struct sctp_stream *stream; + struct sctp_stream stream; /* All outbound chunks go through this structure. */ struct sctp_outq outqueue; diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h index f31fb6331a53..3248beaf16b0 100644 --- a/include/net/tc_act/tc_csum.h +++ b/include/net/tc_act/tc_csum.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <net/act_api.h> +#include <linux/tc_act/tc_csum.h> struct tcf_csum { struct tc_action common; @@ -11,4 +12,18 @@ struct tcf_csum { }; #define to_tcf_csum(a) ((struct tcf_csum *)a) +static inline bool is_tcf_csum(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_CSUM) + return true; +#endif + return false; +} + +static inline u32 tcf_csum_update_flags(const struct tc_action *a) +{ + return to_tcf_csum(a)->update_flags; +} + #endif /* __NET_TC_CSUM_H */ diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index b6f173910226..d576374c4d6f 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -15,7 +15,7 @@ struct tcf_gact { }; #define to_gact(a) ((struct tcf_gact *)a) -static inline bool is_tcf_gact_shot(const struct tc_action *a) +static inline bool __is_tcf_gact_act(const struct tc_action *a, int act) { #ifdef CONFIG_NET_CLS_ACT struct tcf_gact *gact; @@ -24,10 +24,21 @@ static inline bool is_tcf_gact_shot(const struct tc_action *a) return false; gact = to_gact(a); - if (gact->tcf_action == TC_ACT_SHOT) + if (gact->tcf_action == act) return true; #endif return false; } + +static inline bool is_tcf_gact_shot(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_SHOT); +} + +static inline bool is_tcf_gact_trap(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_TRAP); +} + #endif /* __NET_TC_GACT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index b4dc93dae98c..28b577a35786 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void) u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); -__u32 cookie_init_timestamp(struct request_sock *req); +u64 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(struct tcp_options_received *opt); bool cookie_ecn_ok(const struct tcp_options_received *opt, const struct net *net, const struct dst_entry *dst); @@ -700,17 +700,61 @@ u32 __tcp_select_window(struct sock *sk); void tcp_send_window_probe(struct sock *sk); -/* TCP timestamps are only 32-bits, this causes a slight - * complication on 64-bit systems since we store a snapshot - * of jiffies in the buffer control blocks below. We decided - * to use only the low 32-bits of jiffies and hide the ugly - * casts with the following macro. +/* TCP uses 32bit jiffies to save some space. + * Note that this is different from tcp_time_stamp, which + * historically has been the same until linux-4.13. */ -#define tcp_time_stamp ((__u32)(jiffies)) +#define tcp_jiffies32 ((u32)jiffies) + +/* + * Deliver a 32bit value for TCP timestamp option (RFC 7323) + * It is no longer tied to jiffies, but to 1 ms clock. + * Note: double check if you want to use tcp_jiffies32 instead of this. + */ +#define TCP_TS_HZ 1000 + +static inline u64 tcp_clock_ns(void) +{ + return local_clock(); +} + +static inline u64 tcp_clock_us(void) +{ + return div_u64(tcp_clock_ns(), NSEC_PER_USEC); +} + +/* This should only be used in contexts where tp->tcp_mstamp is up to date */ +static inline u32 tcp_time_stamp(const struct tcp_sock *tp) +{ + return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); +} + +/* Could use tcp_clock_us() / 1000, but this version uses a single divide */ +static inline u32 tcp_time_stamp_raw(void) +{ + return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); +} + + +/* Refresh 1us clock of a TCP socket, + * ensuring monotically increasing values. + */ +static inline void tcp_mstamp_refresh(struct tcp_sock *tp) +{ + u64 val = tcp_clock_us(); + + if (val > tp->tcp_mstamp) + tp->tcp_mstamp = val; +} + +static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) +{ + return max_t(s64, t1 - t0, 0); +} static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) { - return skb->skb_mstamp.stamp_jiffies; + return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); } @@ -775,9 +819,9 @@ struct tcp_skb_cb { /* pkts S/ACKed so far upon tx of skb, incl retrans: */ __u32 delivered; /* start of send pipeline phase */ - struct skb_mstamp first_tx_mstamp; + u64 first_tx_mstamp; /* when we reached the "delivered" count */ - struct skb_mstamp delivered_mstamp; + u64 delivered_mstamp; } tx; /* only used for outgoing skbs */ union { struct inet_skb_parm h4; @@ -893,7 +937,7 @@ struct ack_sample { * A sample is invalid if "delivered" or "interval_us" is negative. */ struct rate_sample { - struct skb_mstamp prior_mstamp; /* starting timestamp for interval */ + u64 prior_mstamp; /* starting timestamp for interval */ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ s32 delivered; /* number of packets delivered over interval */ long interval_us; /* time for tp->delivered to incr "delivered" */ @@ -925,7 +969,7 @@ struct tcp_congestion_ops { void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); /* call when ack arrives (optional) */ void (*in_ack_event)(struct sock *sk, u32 flags); - /* new value of cwnd after loss (optional) */ + /* new value of cwnd after loss (required) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); @@ -1242,7 +1286,7 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) if (!sysctl_tcp_slow_start_after_idle || tp->packets_out || ca_ops->cong_control) return; - delta = tcp_time_stamp - tp->lsndtime; + delta = tcp_jiffies32 - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) tcp_cwnd_restart(sk, delta); } @@ -1304,8 +1348,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) { const struct inet_connection_sock *icsk = &tp->inet_conn; - return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime, - tcp_time_stamp - tp->rcv_tstamp); + return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, + tcp_jiffies32 - tp->rcv_tstamp); } static inline int tcp_fin_time(const struct sock *sk) @@ -1859,7 +1903,7 @@ void tcp_init(void); /* tcp_recovery.c */ extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, - const struct skb_mstamp *xmit_time); + u64 xmit_time); extern void tcp_rack_reo_timeout(struct sock *sk); /* diff --git a/include/net/x25.h b/include/net/x25.h index c383aa4edbf0..6d30a01d281d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *); /* sysctl_net_x25.c */ #ifdef CONFIG_SYSCTL -void x25_register_sysctl(void); +int x25_register_sysctl(void); void x25_unregister_sysctl(void); #else -static inline void x25_register_sysctl(void) {}; +static inline int x25_register_sysctl(void) { return 0; }; static inline void x25_unregister_sysctl(void) {}; #endif /* CONFIG_SYSCTL */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6793a30c66b1..7e7e2b0d2915 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -979,10 +979,6 @@ struct xfrm_dst { struct flow_cache_object flo; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; -#ifdef CONFIG_XFRM_SUB_POLICY - struct flowi *origin; - struct xfrm_selector *partner; -#endif u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; @@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) dst_release(xdst->route); if (likely(xdst->u.dst.xfrm)) xfrm_state_put(xdst->u.dst.xfrm); -#ifdef CONFIG_XFRM_SUB_POLICY - kfree(xdst->origin); - xdst->origin = NULL; - kfree(xdst->partner); - xdst->partner = NULL; -#endif } #endif diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index f5f70e345318..355b81f4242d 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -158,7 +158,6 @@ enum sa_path_rec_type { }; struct sa_path_rec_ib { - __be64 service_id; __be16 dlid; __be16 slid; u8 raw_traffic; @@ -174,7 +173,6 @@ struct sa_path_rec_roce { }; struct sa_path_rec_opa { - __be64 service_id; __be32 dlid; __be32 slid; u8 raw_traffic; @@ -189,6 +187,7 @@ struct sa_path_rec_opa { struct sa_path_rec { union ib_gid dgid; union ib_gid sgid; + __be64 service_id; /* reserved */ __be32 flow_label; u8 hop_limit; @@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib, ib->ib.dlid = htons(ntohl(opa->opa.dlid)); ib->ib.slid = htons(ntohl(opa->opa.slid)); } - ib->ib.service_id = opa->opa.service_id; + ib->service_id = opa->service_id; ib->ib.raw_traffic = opa->opa.raw_traffic; } @@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa, } opa->opa.slid = slid; opa->opa.dlid = dlid; - opa->opa.service_id = ib->ib.service_id; + opa->service_id = ib->service_id; opa->opa.raw_traffic = ib->ib.raw_traffic; } @@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec) (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); } -static inline void sa_path_set_service_id(struct sa_path_rec *rec, - __be64 service_id) -{ - if (rec->rec_type == SA_PATH_REC_TYPE_IB) - rec->ib.service_id = service_id; - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) - rec->opa.service_id = service_id; -} - static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) { if (rec->rec_type == SA_PATH_REC_TYPE_IB) @@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec, rec->opa.raw_traffic = raw_traffic; } -static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec) -{ - if (rec->rec_type == SA_PATH_REC_TYPE_IB) - return rec->ib.service_id; - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) - return rec->opa.service_id; - return 0; -} - static inline __be32 sa_path_get_slid(struct sa_path_rec *rec) { if (rec->rec_type == SA_PATH_REC_TYPE_IB) diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 585266144329..348c102cb5f6 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -10,9 +10,6 @@ struct ibnl_client_cbs { struct module *module; }; -int ibnl_init(void); -void ibnl_cleanup(void); - /** * Add a a client to the list of IB netlink exporters. * @index: Index of the added client @@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned int group, gfp_t flags); -/** - * Check if there are any listeners to the netlink group - * @group: the netlink group ID - * Returns 0 on success or a negative for no listeners. - */ -int ibnl_chk_listeners(unsigned int group); - #endif /* _RDMA_NETLINK_H */ diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 703a64b4681a..a2dcfb850b9f 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -58,6 +58,8 @@ struct rxrpc_wire_header { #define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */ uint8_t userStatus; /* app-layer defined status */ +#define RXRPC_USERSTATUS_SERVICE_UPGRADE 0x01 /* AuriStor service upgrade request */ + uint8_t securityIndex; /* security protocol ID */ union { __be16 _rsvd; /* reserved */ diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 0cd4c11479b1..b3d1aff5e8ad 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -668,6 +668,10 @@ struct ucc_slow_pram { #define UCC_FAST_GUMR_CTSS 0x00800000 #define UCC_FAST_GUMR_TXSY 0x00020000 #define UCC_FAST_GUMR_RSYN 0x00010000 +#define UCC_FAST_GUMR_SYNL_MASK 0x0000C000 +#define UCC_FAST_GUMR_SYNL_16 0x0000C000 +#define UCC_FAST_GUMR_SYNL_8 0x00008000 +#define UCC_FAST_GUMR_SYNL_AUTO 0x00004000 #define UCC_FAST_GUMR_RTSM 0x00002000 #define UCC_FAST_GUMR_REVD 0x00000400 #define UCC_FAST_GUMR_ENR 0x00000020 @@ -785,6 +789,11 @@ struct ucc_slow_pram { #define UCC_GETH_UPSMR_SMM 0x00000080 #define UCC_GETH_UPSMR_SGMM 0x00000020 +/* UCC Protocol Specific Mode Register (UPSMR), when used for HDLC */ +#define UCC_HDLC_UPSMR_RTE 0x02000000 +#define UCC_HDLC_UPSMR_BUS 0x00200000 +#define UCC_HDLC_UPSMR_CW8 0x00007000 + /* UCC Transmit On Demand Register (UTODR) */ #define UCC_SLOW_TOD 0x8000 #define UCC_FAST_TOD 0x8000 diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 275581d483dd..5f17fb770477 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -557,6 +557,7 @@ struct iscsi_conn { #define LOGIN_FLAGS_READ_ACTIVE 1 #define LOGIN_FLAGS_CLOSED 2 #define LOGIN_FLAGS_READY 4 +#define LOGIN_FLAGS_INITIAL_PDU 8 unsigned long login_flags; struct delayed_work login_work; struct delayed_work login_cleanup_work; diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 29a3d53a4015..ebe96796027a 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -233,6 +233,7 @@ enum rxrpc_congest_change { EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \ EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \ EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \ + EM(RXRPC_CONN_CLIENT_UPGRADE, "Upgd") \ EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \ E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 2b488565599d..a5f6e819fafd 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -100,4 +100,6 @@ #define SO_COOKIE 57 +#define SCM_TIMESTAMPING_PKTINFO 58 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 94dfa9def355..9b2c10b45733 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -82,6 +82,11 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_GET_NEXT_ID, + BPF_MAP_GET_NEXT_ID, + BPF_PROG_GET_FD_BY_ID, + BPF_MAP_GET_FD_BY_ID, + BPF_OBJ_GET_INFO_BY_FD, }; enum bpf_map_type { @@ -209,6 +214,21 @@ union bpf_attr { __u32 repeat; __u32 duration; } test; + + struct { /* anonymous struct used by BPF_*_GET_*_ID */ + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + }; + __u32 next_id; + }; + + struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ + __u32 bpf_fd; + __u32 info_len; + __aligned_u64 info; + } info; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -313,8 +333,11 @@ union bpf_attr { * @flags: room for future extensions * Return: 0 on success or negative error * - * u64 bpf_perf_event_read(&map, index) - * Return: Number events read or error code + * u64 bpf_perf_event_read(map, flags) + * read perf event counter value + * @map: pointer to perf_event_array map + * @flags: index of event in the map or bitmask flags + * Return: value of perf event counter read or error code * * int bpf_redirect(ifindex, flags) * redirect to another netdev @@ -328,11 +351,11 @@ union bpf_attr { * @skb: pointer to skb * Return: realm if != 0 * - * int bpf_perf_event_output(ctx, map, index, data, size) + * int bpf_perf_event_output(ctx, map, flags, data, size) * output perf raw sample * @ctx: struct pt_regs* * @map: pointer to perf_event_array map - * @index: index of event in the map + * @flags: index of event in the map or bitmask flags * @data: data on stack to be output as raw data * @size: size of data * Return: 0 on success or negative error @@ -670,4 +693,25 @@ struct xdp_md { __u32 data_end; }; +#define BPF_TAG_SIZE 8 + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[BPF_TAG_SIZE]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __aligned_u64 jited_prog_insns; + __aligned_u64 xlated_prog_insns; +} __attribute__((aligned(8))); + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; +} __attribute__((aligned(8))); + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 15ac20382aba..8ed679fe603f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -157,6 +157,7 @@ enum { IFLA_GSO_MAX_SIZE, IFLA_PAD, IFLA_XDP, + IFLA_EVENT, __IFLA_MAX }; @@ -911,4 +912,14 @@ enum { #define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1) +enum { + IFLA_EVENT_NONE, + IFLA_EVENT_REBOOT, /* internal reset / reboot */ + IFLA_EVENT_FEATURES, /* change in offload features */ + IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */ + IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */ + IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */ + IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ +}; + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 464dcca5ed68..3d421d912193 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -9,6 +9,7 @@ #ifndef _NET_TIMESTAMPING_H #define _NET_TIMESTAMPING_H +#include <linux/types.h> #include <linux/socket.h> /* for SO_TIMESTAMPING */ /* SO_TIMESTAMPING gets an integer bit field comprised of these values */ @@ -26,8 +27,10 @@ enum { SOF_TIMESTAMPING_OPT_CMSG = (1<<10), SOF_TIMESTAMPING_OPT_TSONLY = (1<<11), SOF_TIMESTAMPING_OPT_STATS = (1<<12), + SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), + SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_STATS, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -125,6 +128,16 @@ enum hwtstamp_rx_filters { HWTSTAMP_FILTER_PTP_V2_SYNC, /* PTP v2/802.AS1, any layer, Delay_req packet */ HWTSTAMP_FILTER_PTP_V2_DELAY_REQ, + + /* NTP, UDP, all versions and packet modes */ + HWTSTAMP_FILTER_NTP_ALL, +}; + +/* SCM_TIMESTAMPING_PKTINFO control message */ +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; }; #endif /* _NET_TIMESTAMPING_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index d613be3b3239..2055783e6ee9 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -37,6 +37,13 @@ enum { #define TC_ACT_QUEUED 5 #define TC_ACT_REPEAT 6 #define TC_ACT_REDIRECT 7 +#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu" + * and don't further process the frame + * in hardware. For sw path, this is + * equivalent of TC_ACT_STOLEN - drop + * the skb and act like everything + * is alright. + */ /* There is a special kind of actions called "extended actions", * which need a value parameter. These have a local opcode located in @@ -51,6 +58,7 @@ enum { (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode) #define TC_ACT_JUMP __TC_ACT_EXT(1) +#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2) /* Action type identifiers*/ enum { @@ -450,6 +458,14 @@ enum { TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */ TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */ + TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */ + TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */ + + TCA_FLOWER_KEY_IP_TOS, /* u8 */ + TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */ + TCA_FLOWER_KEY_IP_TTL, /* u8 */ + TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */ + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index cce061382e40..564790e854f7 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -278,6 +278,7 @@ enum rt_scope_t { #define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */ #define RTM_F_PREFIX 0x800 /* Prefix addresses */ #define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */ +#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */ /* Reserved table identifiers */ @@ -549,6 +550,7 @@ enum { TCA_STAB, TCA_PAD, TCA_DUMP_INVISIBLE, + TCA_CHAIN, __TCA_MAX }; diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index 361297e96f58..576c704e3fb8 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h @@ -22,6 +22,9 @@ */ #define USB_MAXCHILDREN 31 +/* See USB 3.1 spec Table 10-5 */ +#define USB_SS_MAXPORTS 15 + /* * Hub request types */ |