summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/processor.h20
-rw-r--r--include/dt-bindings/net/qca-ar803x.h13
-rw-r--r--include/linux/bpf.h30
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/dynamic_debug.h6
-rw-r--r--include/linux/efi.h18
-rw-r--r--include/linux/export.h14
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/firmware/broadcom/tee_bnxt_fw.h14
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/icmp.h15
-rw-r--r--include/linux/icmpv6.h14
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/if_vlan.h11
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/netdevice.h76
-rw-r--r--include/linux/of_net.h7
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h3
-rw-r--r--include/linux/pm_qos.h56
-rw-r--r--include/linux/security.h1
-rw-r--r--include/linux/skbuff.h40
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/sunrpc/bc_xprt.h5
-rw-r--r--include/linux/sxgbe_platform.h4
-rw-r--r--include/linux/sysfs.h4
-rw-r--r--include/linux/u64_stats_sync.h51
-rw-r--r--include/linux/uaccess.h16
-rw-r--r--include/linux/virtio_vsock.h1
-rw-r--r--include/net/act_api.h46
-rw-r--r--include/net/arp.h4
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/busy_poll.h6
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/devlink.h45
-rw-r--r--include/net/dsa.h84
-rw-r--r--include/net/flow_dissector.h23
-rw-r--r--include/net/fq.h2
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/hwbm.h10
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/ndisc.h8
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/sch_generic.h12
-rw-r--r--include/net/sock.h38
-rw-r--r--include/net/tcp.h12
-rw-r--r--include/net/vxlan.h1
-rw-r--r--include/net/xdp_sock.h51
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/soc/fsl/qman.h11
-rw-r--r--include/sound/simple_card_utils.h8
-rw-r--r--include/trace/events/bridge.h12
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/uapi/linux/bpf.h124
-rw-r--r--include/uapi/linux/dcbnl.h2
-rw-r--r--include/uapi/linux/fuse.h37
-rw-r--r--include/uapi/linux/gen_stats.h5
-rw-r--r--include/uapi/linux/lwtunnel.h41
-rw-r--r--include/uapi/linux/pkt_cls.h5
-rw-r--r--include/uapi/linux/tipc.h1
67 files changed, 800 insertions, 281 deletions
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index f936033cb9e6..47805172e73d 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -232,8 +232,8 @@ struct acpi_processor {
struct acpi_processor_limit limit;
struct thermal_cooling_device *cdev;
struct device *dev; /* Processor device. */
- struct dev_pm_qos_request perflib_req;
- struct dev_pm_qos_request thermal_req;
+ struct freq_qos_request perflib_req;
+ struct freq_qos_request thermal_req;
};
struct acpi_processor_errata {
@@ -302,8 +302,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
#ifdef CONFIG_CPU_FREQ
extern bool acpi_processor_cpufreq_init;
void acpi_processor_ignore_ppc_init(void);
-void acpi_processor_ppc_init(int cpu);
-void acpi_processor_ppc_exit(int cpu);
+void acpi_processor_ppc_init(struct cpufreq_policy *policy);
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else
@@ -311,11 +311,11 @@ static inline void acpi_processor_ignore_ppc_init(void)
{
return;
}
-static inline void acpi_processor_ppc_init(int cpu)
+static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
return;
}
-static inline void acpi_processor_ppc_exit(int cpu)
+static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
return;
}
@@ -431,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
-void acpi_thermal_cpufreq_init(int cpu);
-void acpi_thermal_cpufreq_exit(int cpu);
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else
-static inline void acpi_thermal_cpufreq_init(int cpu)
+static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
return;
}
-static inline void acpi_thermal_cpufreq_exit(int cpu)
+static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
return;
}
diff --git a/include/dt-bindings/net/qca-ar803x.h b/include/dt-bindings/net/qca-ar803x.h
new file mode 100644
index 000000000000..9c046c7242ed
--- /dev/null
+++ b/include/dt-bindings/net/qca-ar803x.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Qualcomm Atheros AR803x PHYs
+ */
+
+#ifndef _DT_BINDINGS_QCA_AR803X_H
+#define _DT_BINDINGS_QCA_AR803X_H
+
+#define AR803X_STRENGTH_FULL 0
+#define AR803X_STRENGTH_HALF 1
+#define AR803X_STRENGTH_QUARTER 2
+
+#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 171be30fe0ae..7c7f518811a6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -373,6 +373,11 @@ enum bpf_cgroup_storage_type {
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+/* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+#define MAX_BPF_FUNC_ARGS 12
+
struct bpf_prog_stats {
u64 cnt;
u64 nsecs;
@@ -1004,31 +1009,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
}
#endif
-#if defined(CONFIG_XDP_SOCKETS)
-struct xdp_sock;
-struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs);
-void __xsk_map_flush(struct bpf_map *map);
-#else
-struct xdp_sock;
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
-{
- return NULL;
-}
-
-static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void __xsk_map_flush(struct bpf_map *map)
-{
-}
-#endif
-
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 36a9c2325176..de14872b01ba 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -26,6 +26,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c57e88e85c41..92d5fdc8154e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@@ -76,8 +77,10 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct dev_pm_qos_request *min_freq_req;
- struct dev_pm_qos_request *max_freq_req;
+ struct freq_constraints constraints;
+ struct freq_qos_request *min_freq_req;
+ struct freq_qos_request *max_freq_req;
+
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 6c809440f319..4cf02ecd67de 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -204,6 +204,12 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
#define dynamic_dev_dbg(dev, fmt, ...) \
do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
#endif
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index bd3837022307..d87acf62958e 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1579,9 +1579,22 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map);
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+
+static inline
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
- unsigned long *addr);
+ unsigned long *addr)
+{
+ /*
+ * Don't allocate at 0x0. It will confuse code that
+ * checks pointers against NULL. Skip the first 8
+ * bytes so we start at a nice even number.
+ */
+ return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
+}
efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
@@ -1592,7 +1605,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
- unsigned long alignment);
+ unsigned long alignment,
+ unsigned long min_addr);
efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image,
diff --git a/include/linux/export.h b/include/linux/export.h
index 621158ecd2e2..941d075f03d6 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -18,8 +18,6 @@ extern struct module __this_module;
#define THIS_MODULE ((struct module *)0)
#endif
-#define NS_SEPARATOR "."
-
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
@@ -48,11 +46,11 @@ extern struct module __this_module;
* absolute relocations that require runtime processing on relocatable
* kernels.
*/
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
+#define __KSYMTAB_ENTRY_NS(sym, sec) \
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
" .balign 4 \n" \
- "__ksymtab_" #ns NS_SEPARATOR #sym ": \n" \
+ "__ksymtab_" #sym ": \n" \
" .long " #sym "- . \n" \
" .long __kstrtab_" #sym "- . \n" \
" .long __kstrtabns_" #sym "- . \n" \
@@ -74,16 +72,14 @@ struct kernel_symbol {
int namespace_offset;
};
#else
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
- static const struct kernel_symbol __ksymtab_##sym##__##ns \
- asm("__ksymtab_" #ns NS_SEPARATOR #sym) \
+#define __KSYMTAB_ENTRY_NS(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
= { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
- asm("__ksymtab_" #sym) \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
= { (unsigned long)&sym, __kstrtab_##sym, NULL }
@@ -115,7 +111,7 @@ struct kernel_symbol {
static const char __kstrtabns_##sym[] \
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #ns; \
- __KSYMTAB_ENTRY_NS(sym, sec, ns)
+ __KSYMTAB_ENTRY_NS(sym, sec)
#define ___EXPORT_SYMBOL(sym, sec) \
___export_symbol_common(sym, sec); \
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 22ebea2e64ea..7a6f8f6f1da4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1103,7 +1103,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
#endif /* CONFIG_BPF_JIT */
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
#define BPF_ANC BIT(15)
diff --git a/include/linux/firmware/broadcom/tee_bnxt_fw.h b/include/linux/firmware/broadcom/tee_bnxt_fw.h
new file mode 100644
index 000000000000..f24c82d6ef73
--- /dev/null
+++ b/include/linux/firmware/broadcom/tee_bnxt_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#ifndef _BROADCOM_TEE_BNXT_FW_H
+#define _BROADCOM_TEE_BNXT_FW_H
+
+#include <linux/types.h>
+
+int tee_bnxt_fw_load(void);
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size);
+
+#endif /* _BROADCOM_TEE_BNXT_FW_H */
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 975553a9f75d..54d9436600c7 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -403,6 +403,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+
extern struct bus_type fsl_mc_bus_type;
extern struct device_type fsl_mc_bus_dprc_type;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..61f2f6ff9467 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context. When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation. The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+ return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+ __GFP_DIRECT_RECLAIM;
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 2d8aaf7d4b9e..81ca84ce3119 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -20,4 +20,19 @@ static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
+
+static inline bool icmp_is_err(int type)
+{
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_REDIRECT:
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index a8f888976137..ef1cbb5f454f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -46,4 +46,18 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
+
+static inline bool icmpv6_is_err(int type)
+{
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 2e55e4cdbd8a..a367ead4bf4b 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,6 @@ struct macvlan_dev {
netdev_features_t set_features;
enum macvlan_mode mode;
u16 flags;
- int nest_level;
unsigned int macaddr_count;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 06faa066496f..ec7e4bd07f82 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -223,6 +223,7 @@ struct team {
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
+ struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 244278d5c222..b05e855f1ddd 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -182,7 +182,6 @@ struct vlan_dev_priv {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int nest_level;
};
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG_ON(!is_vlan_dev(dev));
- return vlan_dev_priv(dev)->nest_level;
-}
#else
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
{
return false;
}
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG();
- return 0;
-}
#endif
/**
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 138c50d5a353..0836fe232f97 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1545,9 +1545,8 @@ struct mlx5_ifc_extended_dest_format_bits {
};
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
- struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+ struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
- u8 reserved_at_0[0x40];
};
struct mlx5_ifc_fte_match_param_bits {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3207e0b9ec4e..f857f01234f7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1431,7 +1431,6 @@ struct net_device_ops {
void (*ndo_dfwd_del_station)(struct net_device *pdev,
void *priv);
- int (*ndo_get_lock_subclass)(struct net_device *dev);
int (*ndo_set_tx_maxrate)(struct net_device *dev,
int queue_index,
u32 maxrate);
@@ -1659,6 +1658,8 @@ enum netdev_priv_flags {
* @perm_addr: Permanent hw address
* @addr_assign_type: Hw address assignment type
* @addr_len: Hardware address length
+ * @upper_level: Maximum depth level of upper devices.
+ * @lower_level: Maximum depth level of lower devices.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
@@ -1768,9 +1769,13 @@ enum netdev_priv_flags {
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
- *
- * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
+ spinlock
+ * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ * @qdisc_xmit_lock_key: lockdep class annotating
+ * netdev_queue->_xmit_lock spinlock
+ * @addr_list_lock_key: lockdep class annotating
+ * net_device->addr_list_lock spinlock
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -1885,6 +1890,8 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
unsigned char addr_len;
+ unsigned char upper_level;
+ unsigned char lower_level;
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
@@ -2055,8 +2062,10 @@ struct net_device {
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
- struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
+ struct lock_class_key qdisc_tx_busylock_key;
+ struct lock_class_key qdisc_running_key;
+ struct lock_class_key qdisc_xmit_lock_key;
+ struct lock_class_key addr_list_lock_key;
bool proto_down;
unsigned wol_enabled:1;
};
@@ -2134,23 +2143,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
-#define netdev_lockdep_set_classes(dev) \
-{ \
- static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
- static struct lock_class_key qdisc_xmit_lock_key; \
- static struct lock_class_key dev_addr_list_lock_key; \
- unsigned int i; \
- \
- (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
- lockdep_set_class(&(dev)->addr_list_lock, \
- &dev_addr_list_lock_key); \
- for (i = 0; i < (dev)->num_tx_queues; i++) \
- lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
- &qdisc_xmit_lock_key); \
-}
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -2404,11 +2396,23 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+
+static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
+
+ u64_stats_update_begin(&lstats->syncp);
+ u64_stats_add(&lstats->bytes, len);
+ u64_stats_inc(&lstats->packets);
+ u64_stats_update_end(&lstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -3155,6 +3159,7 @@ static inline void netif_stop_queue(struct net_device *dev)
}
void netif_tx_stop_all_queues(struct net_device *dev);
+void netdev_update_lockdep_key(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -4072,16 +4077,6 @@ static inline void netif_addr_lock(struct net_device *dev)
spin_lock(&dev->addr_list_lock);
}
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
- int subclass = SINGLE_DEPTH_NESTING;
-
- if (dev->netdev_ops->ndo_get_lock_subclass)
- subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
-
- spin_lock_nested(&dev->addr_list_lock, subclass);
-}
-
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
@@ -4342,6 +4337,16 @@ int netdev_master_upper_dev_link(struct net_device *dev,
struct netlink_ext_ack *extack);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev);
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
@@ -4353,7 +4358,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
-int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 6aeaea1775e6..71bbfcf3adcd 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -6,15 +6,18 @@
#ifndef __LINUX_OF_NET_H
#define __LINUX_OF_NET_H
+#include <linux/phy.h>
+
#ifdef CONFIG_OF_NET
#include <linux/of.h>
struct net_device;
-extern int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern const void *of_get_mac_address(struct device_node *np);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
-static inline int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np,
+ phy_interface_t *interface)
{
return -ENODEV;
}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 61448c19a132..68ccc5b1913b 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -292,7 +292,7 @@ struct pmu {
* -EBUSY -- @event is for this PMU but PMU temporarily unavailable
* -EINVAL -- @event is for this PMU but @event is not valid
* -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
- * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
+ * -EACCES -- @event is for this PMU, @event is valid, but no privileges
*
* 0 -- @event is for this PMU and valid
*
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index 6eaa53cef0bd..30e676b36b24 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -51,7 +51,10 @@ struct sdma_script_start_addrs {
/* End of v2 array */
s32 zcanfd_2_mcu_addr;
s32 zqspi_2_mcu_addr;
+ s32 mcu_2_ecspi_addr;
/* End of v3 array */
+ s32 mcu_2_zqspi_addr;
+ /* End of v4 array */
};
/**
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 222c3e01397c..ebf5ef17cc2a 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -34,8 +34,6 @@ enum pm_qos_flags_status {
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
-#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
-#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
@@ -54,8 +52,6 @@ struct pm_qos_flags_request {
enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE,
- DEV_PM_QOS_MIN_FREQUENCY,
- DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS,
};
@@ -97,14 +93,10 @@ struct pm_qos_flags {
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
- struct pm_qos_constraints min_frequency;
- struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req;
- struct dev_pm_qos_request *min_frequency_req;
- struct dev_pm_qos_request *max_frequency_req;
};
/* Action requested to pm_qos_update_target */
@@ -199,10 +191,6 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- case DEV_PM_QOS_MIN_FREQUENCY:
- return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- case DEV_PM_QOS_MAX_FREQUENCY:
- return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
default:
WARN_ON(1);
return 0;
@@ -267,4 +255,48 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
}
#endif
+#define FREQ_QOS_MIN_DEFAULT_VALUE 0
+#define FREQ_QOS_MAX_DEFAULT_VALUE (-1)
+
+enum freq_qos_req_type {
+ FREQ_QOS_MIN = 1,
+ FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+ struct pm_qos_constraints min_freq;
+ struct blocking_notifier_head min_freq_notifiers;
+ struct pm_qos_constraints max_freq;
+ struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+ enum freq_qos_req_type type;
+ struct plist_node pnode;
+ struct freq_constraints *qos;
+};
+
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+ return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+ enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+ struct freq_qos_request *req,
+ enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index a8d59d612d27..9df7547afc0c 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -105,6 +105,7 @@ enum lockdown_reason {
LOCKDOWN_NONE,
LOCKDOWN_MODULE_SIGNATURE,
LOCKDOWN_DEV_MEM,
+ LOCKDOWN_EFI_TEST,
LOCKDOWN_KEXEC,
LOCKDOWN_HIBERNATION,
LOCKDOWN_PCI_ACCESS,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f7ae12a1d680..dfe02b658829 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1354,7 +1354,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+ const siphash_key_t *perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
@@ -1495,6 +1496,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
}
/**
+ * skb_queue_empty_lockless - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ * This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+ return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
+/**
* skb_queue_is_last - check if skb is the last entry in the queue
* @list: queue head
* @skb: buffer
@@ -1781,7 +1795,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->prev;
+ struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -1847,9 +1861,13 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
+ WRITE_ONCE(newsk->next, next);
+ WRITE_ONCE(newsk->prev, prev);
+ WRITE_ONCE(next->prev, newsk);
+ WRITE_ONCE(prev->next, newsk);
list->qlen++;
}
@@ -1860,11 +1878,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
- first->prev = prev;
- prev->next = first;
+ WRITE_ONCE(first->prev, prev);
+ WRITE_ONCE(prev->next, first);
- last->next = next;
- next->prev = last;
+ WRITE_ONCE(last->next, next);
+ WRITE_ONCE(next->prev, last);
}
/**
@@ -2005,8 +2023,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
- next->prev = prev;
- prev->next = next;
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
}
/**
diff --git a/include/linux/socket.h b/include/linux/socket.h
index fc0bed59fc84..4049d9755cf1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -263,7 +263,7 @@ struct ucred {
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
-#define SOMAXCONN 128
+#define SOMAXCONN 4096
/* Flags we can use with send/ and recv.
Added those for 1003.1g not all are supported yet
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 86f9464c3f5d..d4bcd9387136 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,6 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
+#include <linux/phy.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -132,7 +133,7 @@ struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
int interface;
- int phy_interface;
+ phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
struct device_node *phylink_node;
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 87d27e13d885..d796058cdff2 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -64,6 +64,11 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
return 0;
}
+static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt,
+ unsigned int max_reqs)
+{
+}
+
static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
{
return false;
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
index 267369110584..85ec745767bd 100644
--- a/include/linux/sxgbe_platform.h
+++ b/include/linux/sxgbe_platform.h
@@ -10,6 +10,8 @@
#ifndef __SXGBE_PLATFORM_H__
#define __SXGBE_PLATFORM_H__
+#include <linux/phy.h>
+
/* MDC Clock Selection define*/
#define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */
#define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */
@@ -38,7 +40,7 @@ struct sxgbe_plat_data {
char *phy_bus_name;
int bus_id;
int phy_addr;
- int interface;
+ phy_interface_t interface;
struct sxgbe_mdio_bus_data *mdio_bus_data;
struct sxgbe_dma_cfg *dma_cfg;
int clk_csr;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 5420817ed317..fa7ee503fb76 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -196,9 +196,9 @@ struct bin_attribute {
.size = _size, \
}
-#define __BIN_ATTR_WO(_name) { \
+#define __BIN_ATTR_WO(_name, _size) { \
.attr = { .name = __stringify(_name), .mode = 0200 }, \
- .store = _name##_store, \
+ .write = _name##_write, \
.size = _size, \
}
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index a27604f99ed0..9de5c10293f5 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -40,8 +40,8 @@
* spin_lock_bh(...) or other synchronization to get exclusive access
* ...
* u64_stats_update_begin(&stats->syncp);
- * stats->bytes64 += len; // non atomic operation
- * stats->packets64++; // non atomic operation
+ * u64_stats_add(&stats->bytes64, len); // non atomic operation
+ * u64_stats_inc(&stats->packets64); // non atomic operation
* u64_stats_update_end(&stats->syncp);
*
* While a consumer (reader) should use following template to get consistent
@@ -52,8 +52,8 @@
*
* do {
* start = u64_stats_fetch_begin(&stats->syncp);
- * tbytes = stats->bytes64; // non atomic operation
- * tpackets = stats->packets64; // non atomic operation
+ * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
+ * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
* } while (u64_stats_fetch_retry(&stats->syncp, start));
*
*
@@ -68,6 +68,49 @@ struct u64_stats_sync {
#endif
};
+#if BITS_PER_LONG == 64
+#include <asm/local64.h>
+
+typedef struct {
+ local64_t v;
+} u64_stats_t ;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return local64_read(&p->v);
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ local64_add(val, &p->v);
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ local64_inc(&p->v);
+}
+
+#else
+
+typedef struct {
+ u64 v;
+} u64_stats_t;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return p->v;
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ p->v += val;
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ p->v++;
+}
+#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d4ee6e942562..67f016010aad 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -311,6 +311,7 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
@@ -337,7 +338,22 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+/*
+ * probe_user_write(): safely attempt to write to a location in user space
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
+extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
+
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count);
+extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
long count);
extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 4c7781f4b29b..07875ccc7bb5 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -48,7 +48,6 @@ struct virtio_vsock_sock {
struct virtio_vsock_pkt {
struct virtio_vsock_hdr hdr;
- struct work_struct work;
struct list_head list;
/* socket refcnt not held, only use for cancellation */
struct vsock_sock *vsk;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b18c699681ca..0495bdc034d2 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -41,6 +41,7 @@ struct tc_action {
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
+ u32 tcfa_flags;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -94,7 +95,7 @@ struct tc_action_ops {
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
int bind, bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack);
+ u32 flags, struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
@@ -154,7 +155,11 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
- int bind, bool cpustats);
+ int bind, bool cpustats, u32 flags);
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
@@ -186,6 +191,43 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+
+static inline void tcf_action_update_bstats(struct tc_action *a,
+ struct sk_buff *skb)
+{
+ if (likely(a->cpu_bstats)) {
+ bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ bstats_update(&a->tcfa_bstats, skb);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_drop_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_overlimit_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
+ bool drop, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
diff --git a/include/net/arp.h b/include/net/arp.h
index c8f580a0e6b1..4950191f6b2b 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f7fe45689142..1afc125014da 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -203,7 +203,6 @@ struct bonding {
struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
bool force_primary;
- u32 nest_level;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
@@ -239,6 +238,7 @@ struct bonding {
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
+ struct lock_class_key stats_lock_key;
};
#define bond_slave_get_rcu(dev) \
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 127a5c4e3699..86e028388bad 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- sk->sk_napi_id = skb->napi_id;
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
sk_rx_queue_set(sk, skb);
}
@@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!sk->sk_napi_id)
- sk->sk_napi_id = skb->napi_id;
+ if (!READ_ONCE(sk->sk_napi_id))
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e309cc826b40..5ded77fad7fb 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6597,7 +6597,7 @@ struct cfg80211_roam_info {
* time it is accessed in __cfg80211_roamed() due to delay in scheduling
* rdev->event_work. In case of any failures, the reference is released
* either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
- * released while diconneting from the current bss.
+ * released while disconnecting from the current bss.
*/
void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
gfp_t gfp);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 6bf3b9e0595a..8d6b5846822c 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -569,6 +569,21 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
DEVLINK_TRAP_GENERIC_ID_TAIL_DROP,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -607,6 +622,36 @@ enum devlink_trap_group_generic_id {
"ttl_value_is_too_small"
#define DEVLINK_TRAP_GENERIC_NAME_TAIL_DROP \
"tail_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_IP_PACKET \
+ "non_ip"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_DIP_MC_DMAC \
+ "uc_dip_over_mc_dmac"
+#define DEVLINK_TRAP_GENERIC_NAME_DIP_LB \
+ "dip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_MC \
+ "sip_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_LB \
+ "sip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_CORRUPTED_IP_HDR \
+ "ip_header_corrupted"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_SIP_BC \
+ "ipv4_sip_is_limited_bc"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_RESERVED_SCOPE \
+ "ipv6_mc_dip_reserved_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE \
+ "ipv6_mc_dip_interface_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_MTU_ERROR \
+ "mtu_value_is_too_small"
+#define DEVLINK_TRAP_GENERIC_NAME_UNRESOLVED_NEIGH \
+ "unresolved_neigh"
+#define DEVLINK_TRAP_GENERIC_NAME_RPF \
+ "mc_reverse_path_forwarding"
+#define DEVLINK_TRAP_GENERIC_NAME_REJECT_ROUTE \
+ "reject_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_LPM_UNICAST_MISS \
+ "ipv4_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \
+ "ipv6_lpm_miss"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
diff --git a/include/net/dsa.h b/include/net/dsa.h
index e3c14dc3bab9..9507611a41f0 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -123,10 +123,8 @@ struct dsa_switch_tree {
/* List of switch ports */
struct list_head ports;
- /*
- * Data for the individual switch chips.
- */
- struct dsa_switch *ds[DSA_MAX_SWITCHES];
+ /* List of DSA links composing the routing table */
+ struct list_head rtable;
};
/* TC matchall action types, only mirroring for now */
@@ -214,6 +212,17 @@ struct dsa_port {
bool setup;
};
+/* TODO: ideally DSA ports would have a single dp->link_dp member,
+ * and no dst->rtable nor this struct dsa_link would be needed,
+ * but this would require some more complex tree walking,
+ * so keep it stupid at the moment and list them all.
+ */
+struct dsa_link {
+ struct dsa_port *dp;
+ struct dsa_port *link_dp;
+ struct list_head list;
+};
+
struct dsa_switch {
bool setup;
@@ -245,13 +254,6 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
- * An array of which element [a] indicates which port on this
- * switch should be used to send packets to that are destined
- * for switch a. Can be NULL if there is only one switch chip.
- */
- s8 rtable[DSA_MAX_SWITCHES];
-
- /*
* Slave mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
@@ -283,13 +285,13 @@ struct dsa_switch {
static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
{
struct dsa_switch_tree *dst = ds->dst;
- struct dsa_port *dp = NULL;
+ struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds == ds && dp->index == p)
- break;
+ return dp;
- return dp;
+ return NULL;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
@@ -324,6 +326,19 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask;
}
+/* Return the local port used to reach an arbitrary switch device */
+static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_link *dl;
+
+ list_for_each_entry(dl, &dst->rtable, list)
+ if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
+ return dl->dp->index;
+
+ return ds->num_ports;
+}
+
/* Return the local port used to reach an arbitrary switch port */
static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
int port)
@@ -331,7 +346,7 @@ static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
if (device == ds->index)
return port;
else
- return ds->rtable[device];
+ return dsa_routing_port(ds, device);
}
/* Return the local port used to reach the dedicated CPU port */
@@ -550,6 +565,45 @@ struct dsa_switch_ops {
*/
netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
struct sk_buff *skb);
+ /* Devlink parameters */
+ int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+};
+
+#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
+ DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
+ dsa_devlink_param_get, dsa_devlink_param_set, NULL)
+
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_params_register(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id);
+
+struct dsa_devlink_priv {
+ struct dsa_switch *ds;
};
struct dsa_switch_driver {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 90bd210be060..b1063db63e66 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -4,8 +4,11 @@
#include <linux/types.h>
#include <linux/in6.h>
+#include <linux/siphash.h>
#include <uapi/linux/if_ether.h>
+struct sk_buff;
+
/**
* struct flow_dissector_key_control:
* @thoff: Transport header offset
@@ -156,19 +159,16 @@ struct flow_dissector_key_ports {
/**
* flow_dissector_key_icmp:
- * @ports: type and code of ICMP header
- * icmp: ICMP type (high) and code (low)
* type: ICMP type
* code: ICMP code
+ * id: session identifier
*/
struct flow_dissector_key_icmp {
- union {
- __be16 icmp;
- struct {
- u8 type;
- u8 code;
- };
+ struct {
+ u8 type;
+ u8 code;
};
+ u16 id;
};
/**
@@ -276,12 +276,14 @@ struct flow_keys_basic {
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
- struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_icmp icmp;
+ /* 'addrs' must be the last member */
struct flow_dissector_key_addrs addrs;
};
@@ -315,6 +317,9 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
}
u32 flow_hash_from_keys(struct flow_keys *keys);
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+ struct flow_dissector_key_icmp *key_icmp,
+ void *data, int thoff, int hlen);
static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
diff --git a/include/net/fq.h b/include/net/fq.h
index d126b5d20261..2ad85e683041 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -69,7 +69,7 @@ struct fq {
struct list_head backlogs;
spinlock_t lock;
u32 flows_cnt;
- u32 perturbation;
+ siphash_key_t perturbation;
u32 limit;
u32 memory_limit;
u32 memory_usage;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index be40a4b327e3..107c0d700ed6 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -108,7 +108,7 @@ begin:
static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
+ u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
return reciprocal_scale(hash, fq->flows_cnt);
}
@@ -308,7 +308,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- fq->perturbation = prandom_u32();
+ get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ca23860adbb9..1424e02cef90 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,6 +7,12 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+/* Note: this used to be in include/uapi/linux/gen_stats.h */
+struct gnet_stats_basic_packed {
+ __u64 bytes;
+ __u64 packets;
+};
+
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index 81643cf8a1c4..c81444611a22 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -21,9 +21,13 @@ void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
#else
-void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
-int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
-int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
+static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+
+static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
+{ return 0; }
+
+static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
+ unsigned int buf_num)
{ return 0; }
#endif /* CONFIG_HWBM */
#endif /* _HWBM_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 95bb77f95bcc..a2c61c36dc4a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -185,7 +185,7 @@ static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
}
struct ip_frag_state {
- struct iphdr *iph;
+ bool DF;
unsigned int hlen;
unsigned int ll_rs;
unsigned int mtu;
@@ -196,7 +196,7 @@ struct ip_frag_state {
};
void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
- unsigned int mtu, struct ip_frag_state *state);
+ unsigned int mtu, bool DF, struct ip_frag_state *state);
struct sk_buff *ip_frag_next(struct sk_buff *skb,
struct ip_frag_state *state);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 93e7a252993d..83be2d93b407 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -889,6 +889,7 @@ struct netns_ipvs {
struct delayed_work defense_work; /* Work handler */
int drop_rate;
int drop_counter;
+ int old_secure_tcp;
atomic_t dropentry;
/* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b2f715ca0567..b5ebeb3b0de0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
@@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 50a67bd6a434..6a86e49181db 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -468,7 +468,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
do {
seq = read_seqbegin(&hh->hh_lock);
- hh_len = hh->hh_len;
+ hh_len = READ_ONCE(hh->hh_len);
if (likely(hh_len <= HH_DATA_MOD)) {
hh_alen = HH_DATA_MOD;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index c5d682992e38..b8ceaf0cd997 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -346,7 +346,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#define __net_initconst __initconst
#endif
-int peernet2id_alloc(struct net *net, struct net *peer);
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
int peernet2id(struct net *net, struct net *peer);
bool peernet_has_id(struct net *net, struct net *peer);
struct net *get_net_ns_by_id(struct net *net, int id);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 637548d54b3e..a8b0a9a4c686 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1286,17 +1286,9 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
- struct gnet_stats_queue *stats = res->qstats;
- int ret;
-
- if (res->ingress)
- ret = netif_receive_skb(skb);
- else
- ret = dev_queue_xmit(skb);
- if (ret && stats)
- qstats_overlimit_inc(res->qstats);
+ return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
}
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 380312cc67a9..bd210c78dc9d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -66,7 +66,6 @@
#include <net/checksum.h>
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
-#include <net/smc.h>
#include <net/l3mdev.h>
/*
@@ -860,17 +859,17 @@ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
static inline void sk_acceptq_removed(struct sock *sk)
{
- sk->sk_ack_backlog--;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
}
static inline void sk_acceptq_added(struct sock *sk)
{
- sk->sk_ack_backlog++;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
- return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+ return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
}
/*
@@ -900,11 +899,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
if (!sk->sk_backlog.tail)
- sk->sk_backlog.head = skb;
+ WRITE_ONCE(sk->sk_backlog.head, skb);
else
sk->sk_backlog.tail->next = skb;
- sk->sk_backlog.tail = skb;
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
skb->next = NULL;
}
@@ -954,8 +953,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
{
int cpu = raw_smp_processor_id();
- if (unlikely(sk->sk_incoming_cpu != cpu))
- sk->sk_incoming_cpu = cpu;
+ if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+ WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -1940,8 +1939,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline void sk_dst_confirm(struct sock *sk)
{
- if (!sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 1;
+ if (!READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
}
static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
@@ -1951,10 +1950,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
- if (sk && sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 0;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
@@ -2242,12 +2241,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_allow_blocking(sk->sk_allocation))
+ if (gfpflags_normal_context(sk->sk_allocation))
return &current->task_frag;
return &sk->sk_frag;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ab4eb5eb5d07..36f195fb576a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -537,7 +537,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-u64 cookie_init_timestamp(struct request_sock *req);
+u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -757,10 +757,16 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
+/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+static inline u32 tcp_ns_to_ts(u64 ns)
+{
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
- return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(tcp_clock_ns());
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -772,7 +778,7 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
}
/* provide the departure time in us unit */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 335283dbe9b3..373aadcfea21 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -197,6 +197,7 @@ struct vxlan_rdst {
u8 offloaded:1;
__be32 remote_vni;
u32 remote_ifindex;
+ struct net_device *remote_dev;
struct list_head list;
struct rcu_head rcu;
struct dst_cache dst_cache;
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index c9398ce7960f..e3780e4b74e1 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -69,7 +69,14 @@ struct xdp_umem {
/* Nodes are linked in the struct xdp_sock map_list field, and used to
* track which maps a certain socket reside in.
*/
-struct xsk_map;
+
+struct xsk_map {
+ struct bpf_map map;
+ struct list_head __percpu *flush_list;
+ spinlock_t lock; /* Synchronize map updates */
+ struct xdp_sock *xsk_map[];
+};
+
struct xsk_map_node {
struct list_head node;
struct xsk_map *map;
@@ -109,8 +116,6 @@ struct xdp_sock {
struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
@@ -134,6 +139,22 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
+int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs);
+void __xsk_map_flush(struct bpf_map *map);
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct xdp_sock *xs;
+
+ if (key >= map->max_entries)
+ return NULL;
+
+ xs = READ_ONCE(m->xsk_map[key]);
+ return xs;
+}
static inline u64 xsk_umem_extract_addr(u64 addr)
{
@@ -224,15 +245,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return -ENOTSUPP;
}
-static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
-{
- return -ENOTSUPP;
-}
-
-static inline void xsk_flush(struct xdp_sock *xs)
-{
-}
-
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return false;
@@ -357,6 +369,21 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0;
}
+static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
+ struct xdp_sock *xs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __xsk_map_flush(struct bpf_map *map)
+{
+}
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6a47ba85c54c..e7e733add99f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -366,7 +366,7 @@ struct ib_tm_caps {
struct ib_cq_init_attr {
unsigned int cqe;
- int comp_vector;
+ u32 comp_vector;
u32 flags;
};
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index aa31c05a103a..cfe00e08e85b 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -32,6 +32,7 @@
#define __FSL_QMAN_H
#include <linux/bitops.h>
+#include <linux/device.h>
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
@@ -915,6 +916,16 @@ u16 qman_affine_channel(int cpu);
struct qman_portal *qman_get_affine_portal(int cpu);
/**
+ * qman_start_using_portal - register a device link for the portal user
+ * @p: the portal that will be in use
+ * @dev: the device that will use the portal
+ *
+ * Makes sure that the devices that use the portal are unbound when the
+ * portal is unbound
+ */
+int qman_start_using_portal(struct qman_portal *p, struct device *dev);
+
+/**
* qman_p_poll_dqrr - process DQRR (fast-path) entries
* @limit: the maximum number of DQRR entries to process
*
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 985a5f583de4..31f76b6abf71 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -135,9 +135,9 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
struct link_info *li);
#ifdef DEBUG
-inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
- char *name,
- struct asoc_simple_dai *dai)
+static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+ char *name,
+ struct asoc_simple_dai *dai)
{
struct device *dev = simple_priv_to_dev(priv);
@@ -167,7 +167,7 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
}
-inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
{
struct snd_soc_card *card = simple_priv_to_card(priv);
struct device *dev = simple_priv_to_dev(priv);
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 8ea966448b58..6b200059c2c5 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -95,16 +95,16 @@ TRACE_EVENT(fdb_delete,
TRACE_EVENT(br_fdb_update,
TP_PROTO(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user),
+ const unsigned char *addr, u16 vid, unsigned long flags),
- TP_ARGS(br, source, addr, vid, added_by_user),
+ TP_ARGS(br, source, addr, vid, flags),
TP_STRUCT__entry(
__string(br_dev, br->dev->name)
__string(dev, source->dev->name)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, vid)
- __field(bool, added_by_user)
+ __field(unsigned long, flags)
),
TP_fast_assign(
@@ -112,14 +112,14 @@ TRACE_EVENT(br_fdb_update,
__assign_str(dev, source->dev->name);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
- __entry->added_by_user = added_by_user;
+ __entry->flags = flags;
),
- TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d",
+ TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u flags 0x%lx",
__get_str(br_dev), __get_str(dev), __entry->addr[0],
__entry->addr[1], __entry->addr[2], __entry->addr[3],
__entry->addr[4], __entry->addr[5], __entry->vid,
- __entry->added_by_user)
+ __entry->flags)
);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 5df604de4f11..75ae1899452b 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1688,6 +1688,7 @@ TRACE_EVENT(qgroup_update_reserve,
__entry->qgid = qgroup->qgroupid;
__entry->cur_reserved = qgroup->rsv.values[type];
__entry->diff = diff;
+ __entry->type = type;
),
TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1710,6 +1711,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TP_fast_assign_btrfs(root->fs_info,
__entry->refroot = root->root_key.objectid;
__entry->diff = diff;
+ __entry->type = type;
),
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1726,7 +1728,6 @@ TRACE_EVENT(qgroup_meta_convert,
TP_STRUCT__entry_btrfs(
__field( u64, refroot )
__field( s64, diff )
- __field( int, type )
),
TP_fast_assign_btrfs(root->fs_info,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4af8b0819a32..df6809a76404 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -173,6 +173,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_PROG_TYPE_TRACING,
};
enum bpf_attach_type {
@@ -199,6 +200,7 @@ enum bpf_attach_type {
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
__MAX_BPF_ATTACH_TYPE
};
@@ -561,10 +563,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
- * address *src* and store the data in *dst*.
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
+ * instead.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -1426,45 +1431,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
- * Copy a NUL terminated string from an unsafe address
- * *unsafe_ptr* to *dst*. The *size* should include the
- * terminating NUL byte. In case the string length is smaller than
- * *size*, the target is not padded with further NUL bytes. If the
- * string length is larger than *size*, just *size*-1 bytes are
- * copied and the last byte is set to NUL.
- *
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
- *
- * ::
- *
- * SEC("kprobe/sys_open")
- * void bpf_sys_open(struct pt_regs *ctx)
- * {
- * char buf[PATHLEN]; // PATHLEN is defined to 256
- * int res = bpf_probe_read_str(buf, sizeof(buf),
- * ctx->di);
- *
- * // Consume buf, for example push it to
- * // userspace via bpf_perf_event_output(); we
- * // can use res (the string length) as event
- * // size, after checking its boundaries.
- * }
- *
- * In comparison, using **bpf_probe_read()** helper here instead
- * to read the string would require to estimate the length at
- * compile time, and would often result in copying more memory
- * than necessary.
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * more details.
*
- * Another useful use case is when parsing individual process
- * arguments or individual environment variables navigating
- * *current*\ **->mm->arg_start** and *current*\
- * **->mm->env_start**: using this helper and the return value,
- * one can quickly iterate at the right offset of the memory area.
+ * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
+ * instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
@@ -2775,6 +2749,72 @@ union bpf_attr {
* restricted to raw_tracepoint bpf programs.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user()** helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * Return
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2888,7 +2928,11 @@ union bpf_attr {
FN(sk_storage_delete), \
FN(send_signal), \
FN(tcp_gen_syncookie), \
- FN(skb_output),
+ FN(skb_output), \
+ FN(probe_read_user), \
+ FN(probe_read_kernel), \
+ FN(probe_read_user_str), \
+ FN(probe_read_kernel_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 69df19aa8e72..a791a94013a6 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -286,7 +286,7 @@ struct dcbmsg {
* @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
* @DCB_CMD_SNUMTCS: set the number of traffic classes
* @DCB_CMD_GBCN: set backward congestion notification configuration
- * @DCB_CMD_SBCN: get backward congestion notification configration.
+ * @DCB_CMD_SBCN: get backward congestion notification configuration.
* @DCB_CMD_GAPP: get application protocol configuration
* @DCB_CMD_SAPP: set application protocol configuration
* @DCB_CMD_IEEE_SET: set IEEE 802.1Qaz configuration
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 802b0377a49e..373cada89815 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -38,6 +38,43 @@
*
* Protocol changelog:
*
+ * 7.1:
+ * - add the following messages:
+ * FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
+ * FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
+ * FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
+ * FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
+ * FUSE_RELEASEDIR
+ * - add padding to messages to accommodate 32-bit servers on 64-bit kernels
+ *
+ * 7.2:
+ * - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
+ * - add FUSE_FSYNCDIR message
+ *
+ * 7.3:
+ * - add FUSE_ACCESS message
+ * - add FUSE_CREATE message
+ * - add filehandle to fuse_setattr_in
+ *
+ * 7.4:
+ * - add frsize to fuse_kstatfs
+ * - clean up request size limit checking
+ *
+ * 7.5:
+ * - add flags and max_write to fuse_init_out
+ *
+ * 7.6:
+ * - add max_readahead to fuse_init_in and fuse_init_out
+ *
+ * 7.7:
+ * - add FUSE_INTERRUPT message
+ * - add POSIX file lock support
+ *
+ * 7.8:
+ * - add lock_owner and flags fields to fuse_release_in
+ * - add FUSE_BMAP message
+ * - add FUSE_DESTROY message
+ *
* 7.9:
* - new fuse_getattr_in input argument of GETATTR
* - add lk_flags in fuse_lk_in
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 065408e16a80..852f234f1fd6 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -13,6 +13,7 @@ enum {
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
TCA_STATS_BASIC_HW,
+ TCA_STATS_PKT64,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
@@ -26,10 +27,6 @@ struct gnet_stats_basic {
__u64 bytes;
__u32 packets;
};
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u32 packets;
-} __attribute__ ((packed));
/**
* struct gnet_stats_rate_est - rate estimator
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index de696ca12f2c..f6035f737193 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -27,6 +27,7 @@ enum lwtunnel_ip_t {
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
LWTUNNEL_IP_PAD,
+ LWTUNNEL_IP_OPTS,
__LWTUNNEL_IP_MAX,
};
@@ -41,12 +42,52 @@ enum lwtunnel_ip6_t {
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
LWTUNNEL_IP6_PAD,
+ LWTUNNEL_IP6_OPTS,
__LWTUNNEL_IP6_MAX,
};
#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
enum {
+ LWTUNNEL_IP_OPTS_UNSPEC,
+ LWTUNNEL_IP_OPTS_GENEVE,
+ LWTUNNEL_IP_OPTS_VXLAN,
+ LWTUNNEL_IP_OPTS_ERSPAN,
+ __LWTUNNEL_IP_OPTS_MAX,
+};
+
+#define LWTUNNEL_IP_OPTS_MAX (__LWTUNNEL_IP_OPTS_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_GENEVE_UNSPEC,
+ LWTUNNEL_IP_OPT_GENEVE_CLASS,
+ LWTUNNEL_IP_OPT_GENEVE_TYPE,
+ LWTUNNEL_IP_OPT_GENEVE_DATA,
+ __LWTUNNEL_IP_OPT_GENEVE_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_GENEVE_MAX (__LWTUNNEL_IP_OPT_GENEVE_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_VXLAN_UNSPEC,
+ LWTUNNEL_IP_OPT_VXLAN_GBP,
+ __LWTUNNEL_IP_OPT_VXLAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_VXLAN_MAX (__LWTUNNEL_IP_OPT_VXLAN_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_ERSPAN_UNSPEC,
+ LWTUNNEL_IP_OPT_ERSPAN_VER,
+ LWTUNNEL_IP_OPT_ERSPAN_INDEX,
+ LWTUNNEL_IP_OPT_ERSPAN_DIR,
+ LWTUNNEL_IP_OPT_ERSPAN_HWID,
+ __LWTUNNEL_IP_OPT_ERSPAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_ERSPAN_MAX (__LWTUNNEL_IP_OPT_ERSPAN_MAX - 1)
+
+enum {
LWT_BPF_PROG_UNSPEC,
LWT_BPF_PROG_FD,
LWT_BPF_PROG_NAME,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index a6aa466fac9e..c6ad22f76ede 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -16,9 +16,14 @@ enum {
TCA_ACT_STATS,
TCA_ACT_PAD,
TCA_ACT_COOKIE,
+ TCA_ACT_FLAGS,
__TCA_ACT_MAX
};
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
+ * actions stats.
+ */
+
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
#define TCA_ACT_MAX_PRIO 32
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 7df026ea6aff..76421b878767 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -191,6 +191,7 @@ struct sockaddr_tipc {
#define TIPC_GROUP_JOIN 135 /* Takes struct tipc_group_req* */
#define TIPC_GROUP_LEAVE 136 /* No argument */
#define TIPC_SOCK_RECVQ_USED 137 /* Default: none (read only) */
+#define TIPC_NODELAY 138 /* Default: false */
/*
* Flag values